2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2597 MonoInst *dummy_use;
2599 guint8 *card_table = mono_gc_get_card_table (&shift_bits);
2602 int offset_reg = alloc_preg (cfg);
2603 int card_table_reg = alloc_preg (cfg);
2605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, shift_bits);
2606 MONO_EMIT_NEW_PCONST (cfg, card_table_reg, card_table);
2607 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, card_table_reg, card_table_reg, offset_reg);
2608 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, card_table_reg, 0, 1);
2610 EMIT_NEW_DUMMY_USE (cfg, dummy_use, ptr);
2612 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2613 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2617 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2619 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2620 dummy_use->sreg1 = value_reg;
2621 MONO_ADD_INS (cfg->cbb, dummy_use);
2627 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2629 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2630 unsigned need_wb = 0;
2635 /*types with references can't have alignment smaller than sizeof(void*) */
2636 if (align < SIZEOF_VOID_P)
2639 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2640 if (size > 32 * SIZEOF_VOID_P)
2643 create_write_barrier_bitmap (klass, &need_wb, 0);
2645 /* We don't unroll more than 5 stores to avoid code bloat. */
2646 if (size > 5 * SIZEOF_VOID_P) {
2647 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2648 size += (SIZEOF_VOID_P - 1);
2649 size &= ~(SIZEOF_VOID_P - 1);
2651 EMIT_NEW_ICONST (cfg, iargs [2], size);
2652 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2653 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2657 destreg = iargs [0]->dreg;
2658 srcreg = iargs [1]->dreg;
2661 dest_ptr_reg = alloc_preg (cfg);
2662 tmp_reg = alloc_preg (cfg);
2665 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2667 while (size >= SIZEOF_VOID_P) {
2668 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2672 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2674 offset += SIZEOF_VOID_P;
2675 size -= SIZEOF_VOID_P;
2678 /*tmp += sizeof (void*)*/
2679 if (size >= SIZEOF_VOID_P) {
2680 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2681 MONO_ADD_INS (cfg->cbb, iargs [0]);
2685 /* Those cannot be references since size < sizeof (void*) */
2687 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2694 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2695 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2701 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2702 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2711 * Emit code to copy a valuetype of type @klass whose address is stored in
2712 * @src->dreg to memory whose address is stored at @dest->dreg.
2715 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2717 MonoInst *iargs [4];
2720 MonoMethod *memcpy_method;
2724 * This check breaks with spilled vars... need to handle it during verification anyway.
2725 * g_assert (klass && klass == src->klass && klass == dest->klass);
2729 n = mono_class_native_size (klass, &align);
2731 n = mono_class_value_size (klass, &align);
2733 /* if native is true there should be no references in the struct */
2734 if (cfg->gen_write_barriers && klass->has_references && !native) {
2735 /* Avoid barriers when storing to the stack */
2736 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2737 (dest->opcode == OP_LDADDR))) {
2738 int context_used = 0;
2743 if (cfg->generic_sharing_context)
2744 context_used = mono_class_check_context_used (klass);
2746 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2747 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2749 } else if (context_used) {
2750 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2752 if (cfg->compile_aot) {
2753 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2755 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2756 mono_class_compute_gc_descriptor (klass);
2760 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2765 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2766 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2767 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2771 EMIT_NEW_ICONST (cfg, iargs [2], n);
2773 memcpy_method = get_memcpy_method ();
2774 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2779 get_memset_method (void)
2781 static MonoMethod *memset_method = NULL;
2782 if (!memset_method) {
2783 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2785 g_error ("Old corlib found. Install a new one");
2787 return memset_method;
2791 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2793 MonoInst *iargs [3];
2796 MonoMethod *memset_method;
2798 /* FIXME: Optimize this for the case when dest is an LDADDR */
2800 mono_class_init (klass);
2801 n = mono_class_value_size (klass, &align);
2803 if (n <= sizeof (gpointer) * 5) {
2804 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2807 memset_method = get_memset_method ();
2809 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2810 EMIT_NEW_ICONST (cfg, iargs [2], n);
2811 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2816 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2818 MonoInst *this = NULL;
2820 g_assert (cfg->generic_sharing_context);
2822 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2823 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2824 !method->klass->valuetype)
2825 EMIT_NEW_ARGLOAD (cfg, this, 0);
2827 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2828 MonoInst *mrgctx_loc, *mrgctx_var;
2831 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2833 mrgctx_loc = mono_get_vtable_var (cfg);
2834 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2837 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2838 MonoInst *vtable_loc, *vtable_var;
2842 vtable_loc = mono_get_vtable_var (cfg);
2843 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2845 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2846 MonoInst *mrgctx_var = vtable_var;
2849 vtable_reg = alloc_preg (cfg);
2850 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2851 vtable_var->type = STACK_PTR;
2857 int vtable_reg, res_reg;
2859 vtable_reg = alloc_preg (cfg);
2860 res_reg = alloc_preg (cfg);
2861 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2866 static MonoJumpInfoRgctxEntry *
2867 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2869 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2870 res->method = method;
2871 res->in_mrgctx = in_mrgctx;
2872 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2873 res->data->type = patch_type;
2874 res->data->data.target = patch_data;
2875 res->info_type = info_type;
2880 static inline MonoInst*
2881 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2883 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2887 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2888 MonoClass *klass, int rgctx_type)
2890 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2891 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2893 return emit_rgctx_fetch (cfg, rgctx, entry);
2897 * emit_get_rgctx_method:
2899 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2900 * normal constants, else emit a load from the rgctx.
2903 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2904 MonoMethod *cmethod, int rgctx_type)
2906 if (!context_used) {
2909 switch (rgctx_type) {
2910 case MONO_RGCTX_INFO_METHOD:
2911 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2913 case MONO_RGCTX_INFO_METHOD_RGCTX:
2914 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2917 g_assert_not_reached ();
2920 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2921 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2923 return emit_rgctx_fetch (cfg, rgctx, entry);
2928 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2929 MonoClassField *field, int rgctx_type)
2931 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2932 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2934 return emit_rgctx_fetch (cfg, rgctx, entry);
2938 * On return the caller must check @klass for load errors.
2941 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2943 MonoInst *vtable_arg;
2945 int context_used = 0;
2947 if (cfg->generic_sharing_context)
2948 context_used = mono_class_check_context_used (klass);
2951 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2952 klass, MONO_RGCTX_INFO_VTABLE);
2954 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2958 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2961 if (COMPILE_LLVM (cfg))
2962 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2964 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2965 #ifdef MONO_ARCH_VTABLE_REG
2966 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2967 cfg->uses_vtable_reg = TRUE;
2974 * On return the caller must check @array_class for load errors
2977 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2979 int vtable_reg = alloc_preg (cfg);
2980 int context_used = 0;
2982 if (cfg->generic_sharing_context)
2983 context_used = mono_class_check_context_used (array_class);
2985 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2987 if (cfg->opt & MONO_OPT_SHARED) {
2988 int class_reg = alloc_preg (cfg);
2989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2990 if (cfg->compile_aot) {
2991 int klass_reg = alloc_preg (cfg);
2992 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2993 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2995 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2997 } else if (context_used) {
2998 MonoInst *vtable_ins;
3000 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3001 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3003 if (cfg->compile_aot) {
3007 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3009 vt_reg = alloc_preg (cfg);
3010 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3011 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3014 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3020 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3024 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3026 if (mini_get_debug_options ()->better_cast_details) {
3027 int to_klass_reg = alloc_preg (cfg);
3028 int vtable_reg = alloc_preg (cfg);
3029 int klass_reg = alloc_preg (cfg);
3030 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3033 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3037 MONO_ADD_INS (cfg->cbb, tls_get);
3038 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3039 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3041 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3042 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3048 reset_cast_details (MonoCompile *cfg)
3050 /* Reset the variables holding the cast details */
3051 if (mini_get_debug_options ()->better_cast_details) {
3052 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3054 MONO_ADD_INS (cfg->cbb, tls_get);
3055 /* It is enough to reset the from field */
3056 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3061 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3062 * generic code is generated.
3065 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3067 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3070 MonoInst *rgctx, *addr;
3072 /* FIXME: What if the class is shared? We might not
3073 have to get the address of the method from the
3075 addr = emit_get_rgctx_method (cfg, context_used, method,
3076 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3078 rgctx = emit_get_rgctx (cfg, method, context_used);
3080 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3082 return mono_emit_method_call (cfg, method, &val, NULL);
3087 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3091 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3092 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3093 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3094 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3096 obj_reg = sp [0]->dreg;
3097 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3098 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3100 /* FIXME: generics */
3101 g_assert (klass->rank == 0);
3104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3105 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3107 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3108 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3111 MonoInst *element_class;
3113 /* This assertion is from the unboxcast insn */
3114 g_assert (klass->rank == 0);
3116 element_class = emit_get_rgctx_klass (cfg, context_used,
3117 klass->element_class, MONO_RGCTX_INFO_KLASS);
3119 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3120 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3122 save_cast_details (cfg, klass->element_class, obj_reg);
3123 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3124 reset_cast_details (cfg);
3127 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3128 MONO_ADD_INS (cfg->cbb, add);
3129 add->type = STACK_MP;
3136 * Returns NULL and set the cfg exception on error.
3139 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3141 MonoInst *iargs [2];
3147 MonoInst *iargs [2];
3150 FIXME: we cannot get managed_alloc here because we can't get
3151 the class's vtable (because it's not a closed class)
3153 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3154 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3157 if (cfg->opt & MONO_OPT_SHARED)
3158 rgctx_info = MONO_RGCTX_INFO_KLASS;
3160 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3161 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3163 if (cfg->opt & MONO_OPT_SHARED) {
3164 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3166 alloc_ftn = mono_object_new;
3169 alloc_ftn = mono_object_new_specific;
3172 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3175 if (cfg->opt & MONO_OPT_SHARED) {
3176 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3177 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3179 alloc_ftn = mono_object_new;
3180 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3181 /* This happens often in argument checking code, eg. throw new FooException... */
3182 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3183 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3184 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3186 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3187 MonoMethod *managed_alloc = NULL;
3191 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3192 cfg->exception_ptr = klass;
3196 #ifndef MONO_CROSS_COMPILE
3197 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3200 if (managed_alloc) {
3201 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3202 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3204 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3206 guint32 lw = vtable->klass->instance_size;
3207 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3208 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3209 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3212 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3216 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3220 * Returns NULL and set the cfg exception on error.
3223 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3225 MonoInst *alloc, *ins;
3227 if (mono_class_is_nullable (klass)) {
3228 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3231 /* FIXME: What if the class is shared? We might not
3232 have to get the method address from the RGCTX. */
3233 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3234 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3235 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3237 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3239 return mono_emit_method_call (cfg, method, &val, NULL);
3243 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3247 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3252 // FIXME: This doesn't work yet (class libs tests fail?)
3253 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3256 * Returns NULL and set the cfg exception on error.
3259 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3261 MonoBasicBlock *is_null_bb;
3262 int obj_reg = src->dreg;
3263 int vtable_reg = alloc_preg (cfg);
3264 MonoInst *klass_inst = NULL;
3269 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3270 klass, MONO_RGCTX_INFO_KLASS);
3272 if (is_complex_isinst (klass)) {
3273 /* Complex case, handle by an icall */
3279 args [1] = klass_inst;
3281 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3283 /* Simple case, handled by the code below */
3287 NEW_BBLOCK (cfg, is_null_bb);
3289 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3290 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3292 save_cast_details (cfg, klass, obj_reg);
3294 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3296 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3298 int klass_reg = alloc_preg (cfg);
3300 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3302 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3303 /* the remoting code is broken, access the class for now */
3304 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3305 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3307 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3308 cfg->exception_ptr = klass;
3311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3313 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3316 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3318 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3319 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3323 MONO_START_BB (cfg, is_null_bb);
3325 reset_cast_details (cfg);
3331 * Returns NULL and set the cfg exception on error.
3334 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3337 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3338 int obj_reg = src->dreg;
3339 int vtable_reg = alloc_preg (cfg);
3340 int res_reg = alloc_preg (cfg);
3341 MonoInst *klass_inst = NULL;
3344 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3346 if (is_complex_isinst (klass)) {
3349 /* Complex case, handle by an icall */
3355 args [1] = klass_inst;
3357 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3359 /* Simple case, the code below can handle it */
3363 NEW_BBLOCK (cfg, is_null_bb);
3364 NEW_BBLOCK (cfg, false_bb);
3365 NEW_BBLOCK (cfg, end_bb);
3367 /* Do the assignment at the beginning, so the other assignment can be if converted */
3368 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3369 ins->type = STACK_OBJ;
3372 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3373 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3377 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3378 g_assert (!context_used);
3379 /* the is_null_bb target simply copies the input register to the output */
3380 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3382 int klass_reg = alloc_preg (cfg);
3385 int rank_reg = alloc_preg (cfg);
3386 int eclass_reg = alloc_preg (cfg);
3388 g_assert (!context_used);
3389 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3391 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3394 if (klass->cast_class == mono_defaults.object_class) {
3395 int parent_reg = alloc_preg (cfg);
3396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3397 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3398 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3399 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3400 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3401 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3402 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3404 } else if (klass->cast_class == mono_defaults.enum_class) {
3405 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3407 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3408 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3410 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3411 /* Check that the object is a vector too */
3412 int bounds_reg = alloc_preg (cfg);
3413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3415 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3418 /* the is_null_bb target simply copies the input register to the output */
3419 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3421 } else if (mono_class_is_nullable (klass)) {
3422 g_assert (!context_used);
3423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3424 /* the is_null_bb target simply copies the input register to the output */
3425 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3427 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3428 g_assert (!context_used);
3429 /* the remoting code is broken, access the class for now */
3430 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3431 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3433 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3434 cfg->exception_ptr = klass;
3437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3442 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3446 /* the is_null_bb target simply copies the input register to the output */
3447 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3452 MONO_START_BB (cfg, false_bb);
3454 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3457 MONO_START_BB (cfg, is_null_bb);
3459 MONO_START_BB (cfg, end_bb);
3465 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3467 /* This opcode takes as input an object reference and a class, and returns:
3468 0) if the object is an instance of the class,
3469 1) if the object is not instance of the class,
3470 2) if the object is a proxy whose type cannot be determined */
3473 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3474 int obj_reg = src->dreg;
3475 int dreg = alloc_ireg (cfg);
3477 int klass_reg = alloc_preg (cfg);
3479 NEW_BBLOCK (cfg, true_bb);
3480 NEW_BBLOCK (cfg, false_bb);
3481 NEW_BBLOCK (cfg, false2_bb);
3482 NEW_BBLOCK (cfg, end_bb);
3483 NEW_BBLOCK (cfg, no_proxy_bb);
3485 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3486 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3488 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3489 NEW_BBLOCK (cfg, interface_fail_bb);
3491 tmp_reg = alloc_preg (cfg);
3492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3493 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3494 MONO_START_BB (cfg, interface_fail_bb);
3495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3497 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3499 tmp_reg = alloc_preg (cfg);
3500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3504 tmp_reg = alloc_preg (cfg);
3505 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3506 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3508 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3509 tmp_reg = alloc_preg (cfg);
3510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3511 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3513 tmp_reg = alloc_preg (cfg);
3514 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3516 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3518 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3521 MONO_START_BB (cfg, no_proxy_bb);
3523 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3526 MONO_START_BB (cfg, false_bb);
3528 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3531 MONO_START_BB (cfg, false2_bb);
3533 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3534 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3536 MONO_START_BB (cfg, true_bb);
3538 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3540 MONO_START_BB (cfg, end_bb);
3543 MONO_INST_NEW (cfg, ins, OP_ICONST);
3545 ins->type = STACK_I4;
3551 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3553 /* This opcode takes as input an object reference and a class, and returns:
3554 0) if the object is an instance of the class,
3555 1) if the object is a proxy whose type cannot be determined
3556 an InvalidCastException exception is thrown otherwhise*/
3559 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3560 int obj_reg = src->dreg;
3561 int dreg = alloc_ireg (cfg);
3562 int tmp_reg = alloc_preg (cfg);
3563 int klass_reg = alloc_preg (cfg);
3565 NEW_BBLOCK (cfg, end_bb);
3566 NEW_BBLOCK (cfg, ok_result_bb);
3568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3571 save_cast_details (cfg, klass, obj_reg);
3573 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3574 NEW_BBLOCK (cfg, interface_fail_bb);
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3577 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3578 MONO_START_BB (cfg, interface_fail_bb);
3579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3581 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3583 tmp_reg = alloc_preg (cfg);
3584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3585 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3586 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3588 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3592 NEW_BBLOCK (cfg, no_proxy_bb);
3594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3596 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3598 tmp_reg = alloc_preg (cfg);
3599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3602 tmp_reg = alloc_preg (cfg);
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3607 NEW_BBLOCK (cfg, fail_1_bb);
3609 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3611 MONO_START_BB (cfg, fail_1_bb);
3613 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3616 MONO_START_BB (cfg, no_proxy_bb);
3618 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3621 MONO_START_BB (cfg, ok_result_bb);
3623 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3625 MONO_START_BB (cfg, end_bb);
3628 MONO_INST_NEW (cfg, ins, OP_ICONST);
3630 ins->type = STACK_I4;
3636 * Returns NULL and set the cfg exception on error.
3638 static G_GNUC_UNUSED MonoInst*
3639 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3641 gpointer *trampoline;
3642 MonoInst *obj, *method_ins, *tramp_ins;
3646 obj = handle_alloc (cfg, klass, FALSE, 0);
3650 /* Inline the contents of mono_delegate_ctor */
3652 /* Set target field */
3653 /* Optimize away setting of NULL target */
3654 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3657 /* Set method field */
3658 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3659 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3662 * To avoid looking up the compiled code belonging to the target method
3663 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3664 * store it, and we fill it after the method has been compiled.
3666 if (!cfg->compile_aot && !method->dynamic) {
3667 MonoInst *code_slot_ins;
3670 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3672 domain = mono_domain_get ();
3673 mono_domain_lock (domain);
3674 if (!domain_jit_info (domain)->method_code_hash)
3675 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3676 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3678 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3679 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3681 mono_domain_unlock (domain);
3683 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3688 /* Set invoke_impl field */
3689 if (cfg->compile_aot) {
3690 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3692 trampoline = mono_create_delegate_trampoline (klass);
3693 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3695 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3697 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3703 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3705 MonoJitICallInfo *info;
3707 /* Need to register the icall so it gets an icall wrapper */
3708 info = mono_get_array_new_va_icall (rank);
3710 cfg->flags |= MONO_CFG_HAS_VARARGS;
3712 /* mono_array_new_va () needs a vararg calling convention */
3713 cfg->disable_llvm = TRUE;
3715 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3716 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3720 mono_emit_load_got_addr (MonoCompile *cfg)
3722 MonoInst *getaddr, *dummy_use;
3724 if (!cfg->got_var || cfg->got_var_allocated)
3727 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3728 getaddr->dreg = cfg->got_var->dreg;
3730 /* Add it to the start of the first bblock */
3731 if (cfg->bb_entry->code) {
3732 getaddr->next = cfg->bb_entry->code;
3733 cfg->bb_entry->code = getaddr;
3736 MONO_ADD_INS (cfg->bb_entry, getaddr);
3738 cfg->got_var_allocated = TRUE;
3741 * Add a dummy use to keep the got_var alive, since real uses might
3742 * only be generated by the back ends.
3743 * Add it to end_bblock, so the variable's lifetime covers the whole
3745 * It would be better to make the usage of the got var explicit in all
3746 * cases when the backend needs it (i.e. calls, throw etc.), so this
3747 * wouldn't be needed.
3749 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3750 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3753 static int inline_limit;
3754 static gboolean inline_limit_inited;
3757 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3759 MonoMethodHeaderSummary header;
3761 #ifdef MONO_ARCH_SOFT_FLOAT
3762 MonoMethodSignature *sig = mono_method_signature (method);
3766 if (cfg->generic_sharing_context)
3769 if (cfg->inline_depth > 10)
3772 #ifdef MONO_ARCH_HAVE_LMF_OPS
3773 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3774 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3775 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3780 if (!mono_method_get_header_summary (method, &header))
3783 /*runtime, icall and pinvoke are checked by summary call*/
3784 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3785 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3786 (method->klass->marshalbyref) ||
3790 /* also consider num_locals? */
3791 /* Do the size check early to avoid creating vtables */
3792 if (!inline_limit_inited) {
3793 if (getenv ("MONO_INLINELIMIT"))
3794 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3796 inline_limit = INLINE_LENGTH_LIMIT;
3797 inline_limit_inited = TRUE;
3799 if (header.code_size >= inline_limit)
3803 * if we can initialize the class of the method right away, we do,
3804 * otherwise we don't allow inlining if the class needs initialization,
3805 * since it would mean inserting a call to mono_runtime_class_init()
3806 * inside the inlined code
3808 if (!(cfg->opt & MONO_OPT_SHARED)) {
3809 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3810 if (cfg->run_cctors && method->klass->has_cctor) {
3811 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3812 if (!method->klass->runtime_info)
3813 /* No vtable created yet */
3815 vtable = mono_class_vtable (cfg->domain, method->klass);
3818 /* This makes so that inline cannot trigger */
3819 /* .cctors: too many apps depend on them */
3820 /* running with a specific order... */
3821 if (! vtable->initialized)
3823 mono_runtime_class_init (vtable);
3825 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3826 if (!method->klass->runtime_info)
3827 /* No vtable created yet */
3829 vtable = mono_class_vtable (cfg->domain, method->klass);
3832 if (!vtable->initialized)
3837 * If we're compiling for shared code
3838 * the cctor will need to be run at aot method load time, for example,
3839 * or at the end of the compilation of the inlining method.
3841 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3846 * CAS - do not inline methods with declarative security
3847 * Note: this has to be before any possible return TRUE;
3849 if (mono_method_has_declsec (method))
3852 #ifdef MONO_ARCH_SOFT_FLOAT
3854 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3856 for (i = 0; i < sig->param_count; ++i)
3857 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3865 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3867 if (vtable->initialized && !cfg->compile_aot)
3870 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3873 if (!mono_class_needs_cctor_run (vtable->klass, method))
3876 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3877 /* The initialization is already done before the method is called */
3884 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3888 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3890 mono_class_init (klass);
3891 size = mono_class_array_element_size (klass);
3893 mult_reg = alloc_preg (cfg);
3894 array_reg = arr->dreg;
3895 index_reg = index->dreg;
3897 #if SIZEOF_REGISTER == 8
3898 /* The array reg is 64 bits but the index reg is only 32 */
3899 if (COMPILE_LLVM (cfg)) {
3901 index2_reg = index_reg;
3903 index2_reg = alloc_preg (cfg);
3904 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3907 if (index->type == STACK_I8) {
3908 index2_reg = alloc_preg (cfg);
3909 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3911 index2_reg = index_reg;
3916 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3918 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3919 if (size == 1 || size == 2 || size == 4 || size == 8) {
3920 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3922 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3923 ins->type = STACK_PTR;
3929 add_reg = alloc_preg (cfg);
3931 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3932 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3933 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3934 ins->type = STACK_PTR;
3935 MONO_ADD_INS (cfg->cbb, ins);
3940 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3942 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3944 int bounds_reg = alloc_preg (cfg);
3945 int add_reg = alloc_preg (cfg);
3946 int mult_reg = alloc_preg (cfg);
3947 int mult2_reg = alloc_preg (cfg);
3948 int low1_reg = alloc_preg (cfg);
3949 int low2_reg = alloc_preg (cfg);
3950 int high1_reg = alloc_preg (cfg);
3951 int high2_reg = alloc_preg (cfg);
3952 int realidx1_reg = alloc_preg (cfg);
3953 int realidx2_reg = alloc_preg (cfg);
3954 int sum_reg = alloc_preg (cfg);
3959 mono_class_init (klass);
3960 size = mono_class_array_element_size (klass);
3962 index1 = index_ins1->dreg;
3963 index2 = index_ins2->dreg;
3965 /* range checking */
3966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3967 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3969 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3970 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3971 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3972 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3973 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3974 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3975 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3977 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3978 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3979 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3980 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3981 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3982 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3983 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3985 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3986 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3987 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3988 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3989 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3991 ins->type = STACK_MP;
3993 MONO_ADD_INS (cfg->cbb, ins);
4000 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4004 MonoMethod *addr_method;
4007 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4010 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4012 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4013 /* emit_ldelema_2 depends on OP_LMUL */
4014 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4015 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4019 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4020 addr_method = mono_marshal_get_array_address (rank, element_size);
4021 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4026 static MonoBreakPolicy
4027 always_insert_breakpoint (MonoMethod *method)
4029 return MONO_BREAK_POLICY_ALWAYS;
4032 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4035 * mono_set_break_policy:
4036 * policy_callback: the new callback function
4038 * Allow embedders to decide wherther to actually obey breakpoint instructions
4039 * (both break IL instructions and Debugger.Break () method calls), for example
4040 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4041 * untrusted or semi-trusted code.
4043 * @policy_callback will be called every time a break point instruction needs to
4044 * be inserted with the method argument being the method that calls Debugger.Break()
4045 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4046 * if it wants the breakpoint to not be effective in the given method.
4047 * #MONO_BREAK_POLICY_ALWAYS is the default.
4050 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4052 if (policy_callback)
4053 break_policy_func = policy_callback;
4055 break_policy_func = always_insert_breakpoint;
4059 should_insert_brekpoint (MonoMethod *method) {
4060 switch (break_policy_func (method)) {
4061 case MONO_BREAK_POLICY_ALWAYS:
4063 case MONO_BREAK_POLICY_NEVER:
4065 case MONO_BREAK_POLICY_ON_DBG:
4066 return mono_debug_using_mono_debugger ();
4068 g_warning ("Incorrect value returned from break policy callback");
4073 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4075 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4077 MonoInst *addr, *store, *load;
4078 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4080 /* the bounds check is already done by the callers */
4081 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4083 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4084 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4086 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4087 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4093 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4095 MonoInst *ins = NULL;
4096 #ifdef MONO_ARCH_SIMD_INTRINSICS
4097 if (cfg->opt & MONO_OPT_SIMD) {
4098 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4108 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4110 MonoInst *ins = NULL;
4112 static MonoClass *runtime_helpers_class = NULL;
4113 if (! runtime_helpers_class)
4114 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4115 "System.Runtime.CompilerServices", "RuntimeHelpers");
4117 if (cmethod->klass == mono_defaults.string_class) {
4118 if (strcmp (cmethod->name, "get_Chars") == 0) {
4119 int dreg = alloc_ireg (cfg);
4120 int index_reg = alloc_preg (cfg);
4121 int mult_reg = alloc_preg (cfg);
4122 int add_reg = alloc_preg (cfg);
4124 #if SIZEOF_REGISTER == 8
4125 /* The array reg is 64 bits but the index reg is only 32 */
4126 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4128 index_reg = args [1]->dreg;
4130 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4132 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4133 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4134 add_reg = ins->dreg;
4135 /* Avoid a warning */
4137 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4141 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4142 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4143 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4145 type_from_op (ins, NULL, NULL);
4147 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4148 int dreg = alloc_ireg (cfg);
4149 /* Decompose later to allow more optimizations */
4150 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4151 ins->type = STACK_I4;
4152 ins->flags |= MONO_INST_FAULT;
4153 cfg->cbb->has_array_access = TRUE;
4154 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4157 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4158 int mult_reg = alloc_preg (cfg);
4159 int add_reg = alloc_preg (cfg);
4161 /* The corlib functions check for oob already. */
4162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4163 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4164 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4165 return cfg->cbb->last_ins;
4168 } else if (cmethod->klass == mono_defaults.object_class) {
4170 if (strcmp (cmethod->name, "GetType") == 0) {
4171 int dreg = alloc_preg (cfg);
4172 int vt_reg = alloc_preg (cfg);
4173 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4174 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4175 type_from_op (ins, NULL, NULL);
4178 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4179 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4180 int dreg = alloc_ireg (cfg);
4181 int t1 = alloc_ireg (cfg);
4183 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4184 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4185 ins->type = STACK_I4;
4189 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4190 MONO_INST_NEW (cfg, ins, OP_NOP);
4191 MONO_ADD_INS (cfg->cbb, ins);
4195 } else if (cmethod->klass == mono_defaults.array_class) {
4196 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4197 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4199 #ifndef MONO_BIG_ARRAYS
4201 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4204 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4205 int dreg = alloc_ireg (cfg);
4206 int bounds_reg = alloc_ireg (cfg);
4207 MonoBasicBlock *end_bb, *szarray_bb;
4208 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4210 NEW_BBLOCK (cfg, end_bb);
4211 NEW_BBLOCK (cfg, szarray_bb);
4213 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4214 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4215 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4216 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4217 /* Non-szarray case */
4219 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4220 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4222 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4223 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4225 MONO_START_BB (cfg, szarray_bb);
4228 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4229 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4231 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4232 MONO_START_BB (cfg, end_bb);
4234 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4235 ins->type = STACK_I4;
4241 if (cmethod->name [0] != 'g')
4244 if (strcmp (cmethod->name, "get_Rank") == 0) {
4245 int dreg = alloc_ireg (cfg);
4246 int vtable_reg = alloc_preg (cfg);
4247 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4248 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4249 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4250 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4251 type_from_op (ins, NULL, NULL);
4254 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4255 int dreg = alloc_ireg (cfg);
4257 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4258 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4259 type_from_op (ins, NULL, NULL);
4264 } else if (cmethod->klass == runtime_helpers_class) {
4266 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4267 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4271 } else if (cmethod->klass == mono_defaults.thread_class) {
4272 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4273 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4274 MONO_ADD_INS (cfg->cbb, ins);
4276 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4277 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4278 MONO_ADD_INS (cfg->cbb, ins);
4281 } else if (cmethod->klass == mono_defaults.monitor_class) {
4282 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4283 /* The trampolines don't work under SGEN */
4284 gboolean is_moving_gc = mono_gc_is_moving ();
4286 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
4289 if (COMPILE_LLVM (cfg)) {
4291 * Pass the argument normally, the LLVM backend will handle the
4292 * calling convention problems.
4294 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4296 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4297 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4298 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4299 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4302 return (MonoInst*)call;
4303 } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
4306 if (COMPILE_LLVM (cfg)) {
4307 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4309 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4310 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4311 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4312 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4315 return (MonoInst*)call;
4317 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4318 MonoMethod *fast_method = NULL;
4320 /* Avoid infinite recursion */
4321 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4322 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4323 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4326 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4327 strcmp (cmethod->name, "Exit") == 0)
4328 fast_method = mono_monitor_get_fast_path (cmethod);
4332 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4334 } else if (cmethod->klass->image == mono_defaults.corlib &&
4335 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4336 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4339 #if SIZEOF_REGISTER == 8
4340 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4341 /* 64 bit reads are already atomic */
4342 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4343 ins->dreg = mono_alloc_preg (cfg);
4344 ins->inst_basereg = args [0]->dreg;
4345 ins->inst_offset = 0;
4346 MONO_ADD_INS (cfg->cbb, ins);
4350 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4351 if (strcmp (cmethod->name, "Increment") == 0) {
4352 MonoInst *ins_iconst;
4355 if (fsig->params [0]->type == MONO_TYPE_I4)
4356 opcode = OP_ATOMIC_ADD_NEW_I4;
4357 #if SIZEOF_REGISTER == 8
4358 else if (fsig->params [0]->type == MONO_TYPE_I8)
4359 opcode = OP_ATOMIC_ADD_NEW_I8;
4362 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4363 ins_iconst->inst_c0 = 1;
4364 ins_iconst->dreg = mono_alloc_ireg (cfg);
4365 MONO_ADD_INS (cfg->cbb, ins_iconst);
4367 MONO_INST_NEW (cfg, ins, opcode);
4368 ins->dreg = mono_alloc_ireg (cfg);
4369 ins->inst_basereg = args [0]->dreg;
4370 ins->inst_offset = 0;
4371 ins->sreg2 = ins_iconst->dreg;
4372 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4373 MONO_ADD_INS (cfg->cbb, ins);
4375 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4376 MonoInst *ins_iconst;
4379 if (fsig->params [0]->type == MONO_TYPE_I4)
4380 opcode = OP_ATOMIC_ADD_NEW_I4;
4381 #if SIZEOF_REGISTER == 8
4382 else if (fsig->params [0]->type == MONO_TYPE_I8)
4383 opcode = OP_ATOMIC_ADD_NEW_I8;
4386 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4387 ins_iconst->inst_c0 = -1;
4388 ins_iconst->dreg = mono_alloc_ireg (cfg);
4389 MONO_ADD_INS (cfg->cbb, ins_iconst);
4391 MONO_INST_NEW (cfg, ins, opcode);
4392 ins->dreg = mono_alloc_ireg (cfg);
4393 ins->inst_basereg = args [0]->dreg;
4394 ins->inst_offset = 0;
4395 ins->sreg2 = ins_iconst->dreg;
4396 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4397 MONO_ADD_INS (cfg->cbb, ins);
4399 } else if (strcmp (cmethod->name, "Add") == 0) {
4402 if (fsig->params [0]->type == MONO_TYPE_I4)
4403 opcode = OP_ATOMIC_ADD_NEW_I4;
4404 #if SIZEOF_REGISTER == 8
4405 else if (fsig->params [0]->type == MONO_TYPE_I8)
4406 opcode = OP_ATOMIC_ADD_NEW_I8;
4410 MONO_INST_NEW (cfg, ins, opcode);
4411 ins->dreg = mono_alloc_ireg (cfg);
4412 ins->inst_basereg = args [0]->dreg;
4413 ins->inst_offset = 0;
4414 ins->sreg2 = args [1]->dreg;
4415 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4416 MONO_ADD_INS (cfg->cbb, ins);
4419 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4421 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4422 if (strcmp (cmethod->name, "Exchange") == 0) {
4424 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4426 if (fsig->params [0]->type == MONO_TYPE_I4)
4427 opcode = OP_ATOMIC_EXCHANGE_I4;
4428 #if SIZEOF_REGISTER == 8
4429 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4430 (fsig->params [0]->type == MONO_TYPE_I))
4431 opcode = OP_ATOMIC_EXCHANGE_I8;
4433 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4434 opcode = OP_ATOMIC_EXCHANGE_I4;
4439 MONO_INST_NEW (cfg, ins, opcode);
4440 ins->dreg = mono_alloc_ireg (cfg);
4441 ins->inst_basereg = args [0]->dreg;
4442 ins->inst_offset = 0;
4443 ins->sreg2 = args [1]->dreg;
4444 MONO_ADD_INS (cfg->cbb, ins);
4446 switch (fsig->params [0]->type) {
4448 ins->type = STACK_I4;
4452 ins->type = STACK_I8;
4454 case MONO_TYPE_OBJECT:
4455 ins->type = STACK_OBJ;
4458 g_assert_not_reached ();
4461 if (cfg->gen_write_barriers && is_ref)
4462 emit_write_barrier (cfg, args [0], args [1], -1);
4464 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4466 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4467 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4469 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4470 if (fsig->params [1]->type == MONO_TYPE_I4)
4472 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4473 size = sizeof (gpointer);
4474 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4477 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4478 ins->dreg = alloc_ireg (cfg);
4479 ins->sreg1 = args [0]->dreg;
4480 ins->sreg2 = args [1]->dreg;
4481 ins->sreg3 = args [2]->dreg;
4482 ins->type = STACK_I4;
4483 MONO_ADD_INS (cfg->cbb, ins);
4484 } else if (size == 8) {
4485 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4486 ins->dreg = alloc_ireg (cfg);
4487 ins->sreg1 = args [0]->dreg;
4488 ins->sreg2 = args [1]->dreg;
4489 ins->sreg3 = args [2]->dreg;
4490 ins->type = STACK_I8;
4491 MONO_ADD_INS (cfg->cbb, ins);
4493 /* g_assert_not_reached (); */
4495 if (cfg->gen_write_barriers && is_ref)
4496 emit_write_barrier (cfg, args [0], args [1], -1);
4498 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4502 } else if (cmethod->klass->image == mono_defaults.corlib) {
4503 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4504 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4505 if (should_insert_brekpoint (cfg->method))
4506 MONO_INST_NEW (cfg, ins, OP_BREAK);
4508 MONO_INST_NEW (cfg, ins, OP_NOP);
4509 MONO_ADD_INS (cfg->cbb, ins);
4512 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4513 && strcmp (cmethod->klass->name, "Environment") == 0) {
4515 EMIT_NEW_ICONST (cfg, ins, 1);
4517 EMIT_NEW_ICONST (cfg, ins, 0);
4521 } else if (cmethod->klass == mono_defaults.math_class) {
4523 * There is general branches code for Min/Max, but it does not work for
4525 * http://everything2.com/?node_id=1051618
4529 #ifdef MONO_ARCH_SIMD_INTRINSICS
4530 if (cfg->opt & MONO_OPT_SIMD) {
4531 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4537 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4541 * This entry point could be used later for arbitrary method
4544 inline static MonoInst*
4545 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4546 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4548 if (method->klass == mono_defaults.string_class) {
4549 /* managed string allocation support */
4550 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4551 MonoInst *iargs [2];
4552 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4553 MonoMethod *managed_alloc = NULL;
4555 g_assert (vtable); /*Should not fail since it System.String*/
4556 #ifndef MONO_CROSS_COMPILE
4557 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4561 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4562 iargs [1] = args [0];
4563 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4570 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4572 MonoInst *store, *temp;
4575 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4576 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4579 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4580 * would be different than the MonoInst's used to represent arguments, and
4581 * the ldelema implementation can't deal with that.
4582 * Solution: When ldelema is used on an inline argument, create a var for
4583 * it, emit ldelema on that var, and emit the saving code below in
4584 * inline_method () if needed.
4586 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4587 cfg->args [i] = temp;
4588 /* This uses cfg->args [i] which is set by the preceeding line */
4589 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4590 store->cil_code = sp [0]->cil_code;
4595 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4596 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4598 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4600 check_inline_called_method_name_limit (MonoMethod *called_method)
4603 static char *limit = NULL;
4605 if (limit == NULL) {
4606 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4608 if (limit_string != NULL)
4609 limit = limit_string;
4611 limit = (char *) "";
4614 if (limit [0] != '\0') {
4615 char *called_method_name = mono_method_full_name (called_method, TRUE);
4617 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4618 g_free (called_method_name);
4620 //return (strncmp_result <= 0);
4621 return (strncmp_result == 0);
4628 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4630 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4633 static char *limit = NULL;
4635 if (limit == NULL) {
4636 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4637 if (limit_string != NULL) {
4638 limit = limit_string;
4640 limit = (char *) "";
4644 if (limit [0] != '\0') {
4645 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4647 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4648 g_free (caller_method_name);
4650 //return (strncmp_result <= 0);
4651 return (strncmp_result == 0);
4659 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4660 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4662 MonoInst *ins, *rvar = NULL;
4663 MonoMethodHeader *cheader;
4664 MonoBasicBlock *ebblock, *sbblock;
4666 MonoMethod *prev_inlined_method;
4667 MonoInst **prev_locals, **prev_args;
4668 MonoType **prev_arg_types;
4669 guint prev_real_offset;
4670 GHashTable *prev_cbb_hash;
4671 MonoBasicBlock **prev_cil_offset_to_bb;
4672 MonoBasicBlock *prev_cbb;
4673 unsigned char* prev_cil_start;
4674 guint32 prev_cil_offset_to_bb_len;
4675 MonoMethod *prev_current_method;
4676 MonoGenericContext *prev_generic_context;
4677 gboolean ret_var_set, prev_ret_var_set;
4679 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4681 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4682 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4685 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4686 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4690 if (cfg->verbose_level > 2)
4691 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4693 if (!cmethod->inline_info) {
4694 mono_jit_stats.inlineable_methods++;
4695 cmethod->inline_info = 1;
4698 /* allocate local variables */
4699 cheader = mono_method_get_header (cmethod);
4701 if (cheader == NULL || mono_loader_get_last_error ()) {
4703 mono_metadata_free_mh (cheader);
4704 mono_loader_clear_error ();
4708 /* allocate space to store the return value */
4709 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4710 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4714 prev_locals = cfg->locals;
4715 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4716 for (i = 0; i < cheader->num_locals; ++i)
4717 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4719 /* allocate start and end blocks */
4720 /* This is needed so if the inline is aborted, we can clean up */
4721 NEW_BBLOCK (cfg, sbblock);
4722 sbblock->real_offset = real_offset;
4724 NEW_BBLOCK (cfg, ebblock);
4725 ebblock->block_num = cfg->num_bblocks++;
4726 ebblock->real_offset = real_offset;
4728 prev_args = cfg->args;
4729 prev_arg_types = cfg->arg_types;
4730 prev_inlined_method = cfg->inlined_method;
4731 cfg->inlined_method = cmethod;
4732 cfg->ret_var_set = FALSE;
4733 cfg->inline_depth ++;
4734 prev_real_offset = cfg->real_offset;
4735 prev_cbb_hash = cfg->cbb_hash;
4736 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4737 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4738 prev_cil_start = cfg->cil_start;
4739 prev_cbb = cfg->cbb;
4740 prev_current_method = cfg->current_method;
4741 prev_generic_context = cfg->generic_context;
4742 prev_ret_var_set = cfg->ret_var_set;
4744 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4746 ret_var_set = cfg->ret_var_set;
4748 cfg->inlined_method = prev_inlined_method;
4749 cfg->real_offset = prev_real_offset;
4750 cfg->cbb_hash = prev_cbb_hash;
4751 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4752 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4753 cfg->cil_start = prev_cil_start;
4754 cfg->locals = prev_locals;
4755 cfg->args = prev_args;
4756 cfg->arg_types = prev_arg_types;
4757 cfg->current_method = prev_current_method;
4758 cfg->generic_context = prev_generic_context;
4759 cfg->ret_var_set = prev_ret_var_set;
4760 cfg->inline_depth --;
4762 if ((costs >= 0 && costs < 60) || inline_allways) {
4763 if (cfg->verbose_level > 2)
4764 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4766 mono_jit_stats.inlined_methods++;
4768 /* always add some code to avoid block split failures */
4769 MONO_INST_NEW (cfg, ins, OP_NOP);
4770 MONO_ADD_INS (prev_cbb, ins);
4772 prev_cbb->next_bb = sbblock;
4773 link_bblock (cfg, prev_cbb, sbblock);
4776 * Get rid of the begin and end bblocks if possible to aid local
4779 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4781 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4782 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4784 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4785 MonoBasicBlock *prev = ebblock->in_bb [0];
4786 mono_merge_basic_blocks (cfg, prev, ebblock);
4788 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4789 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4790 cfg->cbb = prev_cbb;
4798 * If the inlined method contains only a throw, then the ret var is not
4799 * set, so set it to a dummy value.
4802 static double r8_0 = 0.0;
4804 switch (rvar->type) {
4806 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4809 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4814 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4817 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4818 ins->type = STACK_R8;
4819 ins->inst_p0 = (void*)&r8_0;
4820 ins->dreg = rvar->dreg;
4821 MONO_ADD_INS (cfg->cbb, ins);
4824 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4827 g_assert_not_reached ();
4831 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4834 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4837 if (cfg->verbose_level > 2)
4838 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4839 cfg->exception_type = MONO_EXCEPTION_NONE;
4840 mono_loader_clear_error ();
4842 /* This gets rid of the newly added bblocks */
4843 cfg->cbb = prev_cbb;
4845 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4850 * Some of these comments may well be out-of-date.
4851 * Design decisions: we do a single pass over the IL code (and we do bblock
4852 * splitting/merging in the few cases when it's required: a back jump to an IL
4853 * address that was not already seen as bblock starting point).
4854 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4855 * Complex operations are decomposed in simpler ones right away. We need to let the
4856 * arch-specific code peek and poke inside this process somehow (except when the
4857 * optimizations can take advantage of the full semantic info of coarse opcodes).
4858 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4859 * MonoInst->opcode initially is the IL opcode or some simplification of that
4860 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4861 * opcode with value bigger than OP_LAST.
4862 * At this point the IR can be handed over to an interpreter, a dumb code generator
4863 * or to the optimizing code generator that will translate it to SSA form.
4865 * Profiling directed optimizations.
4866 * We may compile by default with few or no optimizations and instrument the code
4867 * or the user may indicate what methods to optimize the most either in a config file
4868 * or through repeated runs where the compiler applies offline the optimizations to
4869 * each method and then decides if it was worth it.
4872 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4873 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4874 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4875 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4876 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4877 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4878 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4879 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4881 /* offset from br.s -> br like opcodes */
4882 #define BIG_BRANCH_OFFSET 13
4885 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4887 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4889 return b == NULL || b == bb;
4893 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4895 unsigned char *ip = start;
4896 unsigned char *target;
4899 MonoBasicBlock *bblock;
4900 const MonoOpcode *opcode;
4903 cli_addr = ip - start;
4904 i = mono_opcode_value ((const guint8 **)&ip, end);
4907 opcode = &mono_opcodes [i];
4908 switch (opcode->argument) {
4909 case MonoInlineNone:
4912 case MonoInlineString:
4913 case MonoInlineType:
4914 case MonoInlineField:
4915 case MonoInlineMethod:
4918 case MonoShortInlineR:
4925 case MonoShortInlineVar:
4926 case MonoShortInlineI:
4929 case MonoShortInlineBrTarget:
4930 target = start + cli_addr + 2 + (signed char)ip [1];
4931 GET_BBLOCK (cfg, bblock, target);
4934 GET_BBLOCK (cfg, bblock, ip);
4936 case MonoInlineBrTarget:
4937 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4938 GET_BBLOCK (cfg, bblock, target);
4941 GET_BBLOCK (cfg, bblock, ip);
4943 case MonoInlineSwitch: {
4944 guint32 n = read32 (ip + 1);
4947 cli_addr += 5 + 4 * n;
4948 target = start + cli_addr;
4949 GET_BBLOCK (cfg, bblock, target);
4951 for (j = 0; j < n; ++j) {
4952 target = start + cli_addr + (gint32)read32 (ip);
4953 GET_BBLOCK (cfg, bblock, target);
4963 g_assert_not_reached ();
4966 if (i == CEE_THROW) {
4967 unsigned char *bb_start = ip - 1;
4969 /* Find the start of the bblock containing the throw */
4971 while ((bb_start >= start) && !bblock) {
4972 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4976 bblock->out_of_line = 1;
4985 static inline MonoMethod *
4986 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4990 if (m->wrapper_type != MONO_WRAPPER_NONE)
4991 return mono_method_get_wrapper_data (m, token);
4993 method = mono_get_method_full (m->klass->image, token, klass, context);
4998 static inline MonoMethod *
4999 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5001 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5003 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5009 static inline MonoClass*
5010 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5014 if (method->wrapper_type != MONO_WRAPPER_NONE)
5015 klass = mono_method_get_wrapper_data (method, token);
5017 klass = mono_class_get_full (method->klass->image, token, context);
5019 mono_class_init (klass);
5024 * Returns TRUE if the JIT should abort inlining because "callee"
5025 * is influenced by security attributes.
5028 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5032 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5036 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5037 if (result == MONO_JIT_SECURITY_OK)
5040 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5041 /* Generate code to throw a SecurityException before the actual call/link */
5042 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5045 NEW_ICONST (cfg, args [0], 4);
5046 NEW_METHODCONST (cfg, args [1], caller);
5047 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5048 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5049 /* don't hide previous results */
5050 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5051 cfg->exception_data = result;
5059 throw_exception (void)
5061 static MonoMethod *method = NULL;
5064 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5065 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5072 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5074 MonoMethod *thrower = throw_exception ();
5077 EMIT_NEW_PCONST (cfg, args [0], ex);
5078 mono_emit_method_call (cfg, thrower, args, NULL);
5082 * Return the original method is a wrapper is specified. We can only access
5083 * the custom attributes from the original method.
5086 get_original_method (MonoMethod *method)
5088 if (method->wrapper_type == MONO_WRAPPER_NONE)
5091 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5092 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5095 /* in other cases we need to find the original method */
5096 return mono_marshal_method_from_wrapper (method);
5100 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5101 MonoBasicBlock *bblock, unsigned char *ip)
5103 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5104 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5106 emit_throw_exception (cfg, ex);
5110 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5111 MonoBasicBlock *bblock, unsigned char *ip)
5113 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5114 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5116 emit_throw_exception (cfg, ex);
5120 * Check that the IL instructions at ip are the array initialization
5121 * sequence and return the pointer to the data and the size.
5124 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5127 * newarr[System.Int32]
5129 * ldtoken field valuetype ...
5130 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5132 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5133 guint32 token = read32 (ip + 7);
5134 guint32 field_token = read32 (ip + 2);
5135 guint32 field_index = field_token & 0xffffff;
5137 const char *data_ptr;
5139 MonoMethod *cmethod;
5140 MonoClass *dummy_class;
5141 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5147 *out_field_token = field_token;
5149 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5152 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5154 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5155 case MONO_TYPE_BOOLEAN:
5159 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5160 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5161 case MONO_TYPE_CHAR:
5171 return NULL; /* stupid ARM FP swapped format */
5181 if (size > mono_type_size (field->type, &dummy_align))
5184 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5185 if (!method->klass->image->dynamic) {
5186 field_index = read32 (ip + 2) & 0xffffff;
5187 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5188 data_ptr = mono_image_rva_map (method->klass->image, rva);
5189 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5190 /* for aot code we do the lookup on load */
5191 if (aot && data_ptr)
5192 return GUINT_TO_POINTER (rva);
5194 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5196 data_ptr = mono_field_get_data (field);
5204 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5206 char *method_fname = mono_method_full_name (method, TRUE);
5208 MonoMethodHeader *header = mono_method_get_header (method);
5210 if (header->code_size == 0)
5211 method_code = g_strdup ("method body is empty.");
5213 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5214 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5215 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5216 g_free (method_fname);
5217 g_free (method_code);
5218 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5222 set_exception_object (MonoCompile *cfg, MonoException *exception)
5224 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5225 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5226 cfg->exception_ptr = exception;
5230 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5234 if (cfg->generic_sharing_context)
5235 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5237 type = &klass->byval_arg;
5238 return MONO_TYPE_IS_REFERENCE (type);
5242 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5245 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5246 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5247 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5248 /* Optimize reg-reg moves away */
5250 * Can't optimize other opcodes, since sp[0] might point to
5251 * the last ins of a decomposed opcode.
5253 sp [0]->dreg = (cfg)->locals [n]->dreg;
5255 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5260 * ldloca inhibits many optimizations so try to get rid of it in common
5263 static inline unsigned char *
5264 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5273 local = read16 (ip + 2);
5277 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5278 gboolean skip = FALSE;
5280 /* From the INITOBJ case */
5281 token = read32 (ip + 2);
5282 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5283 CHECK_TYPELOAD (klass);
5284 if (generic_class_is_reference_type (cfg, klass)) {
5285 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5286 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5287 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5288 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5289 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5302 is_exception_class (MonoClass *class)
5305 if (class == mono_defaults.exception_class)
5307 class = class->parent;
5313 * mono_method_to_ir:
5315 * Translate the .net IL into linear IR.
5318 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5319 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5320 guint inline_offset, gboolean is_virtual_call)
5323 MonoInst *ins, **sp, **stack_start;
5324 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5325 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5326 MonoMethod *cmethod, *method_definition;
5327 MonoInst **arg_array;
5328 MonoMethodHeader *header;
5330 guint32 token, ins_flag;
5332 MonoClass *constrained_call = NULL;
5333 unsigned char *ip, *end, *target, *err_pos;
5334 static double r8_0 = 0.0;
5335 MonoMethodSignature *sig;
5336 MonoGenericContext *generic_context = NULL;
5337 MonoGenericContainer *generic_container = NULL;
5338 MonoType **param_types;
5339 int i, n, start_new_bblock, dreg;
5340 int num_calls = 0, inline_costs = 0;
5341 int breakpoint_id = 0;
5343 MonoBoolean security, pinvoke;
5344 MonoSecurityManager* secman = NULL;
5345 MonoDeclSecurityActions actions;
5346 GSList *class_inits = NULL;
5347 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5349 gboolean init_locals, seq_points, skip_dead_blocks;
5351 /* serialization and xdomain stuff may need access to private fields and methods */
5352 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5353 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5354 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5355 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5356 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5357 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5359 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5361 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5362 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5363 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5364 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5366 image = method->klass->image;
5367 header = mono_method_get_header (method);
5369 MonoLoaderError *error;
5371 if ((error = mono_loader_get_last_error ())) {
5372 cfg->exception_type = error->exception_type;
5374 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5375 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5377 goto exception_exit;
5379 generic_container = mono_method_get_generic_container (method);
5380 sig = mono_method_signature (method);
5381 num_args = sig->hasthis + sig->param_count;
5382 ip = (unsigned char*)header->code;
5383 cfg->cil_start = ip;
5384 end = ip + header->code_size;
5385 mono_jit_stats.cil_code_size += header->code_size;
5386 init_locals = header->init_locals;
5388 seq_points = cfg->gen_seq_points && cfg->method == method;
5391 * Methods without init_locals set could cause asserts in various passes
5396 method_definition = method;
5397 while (method_definition->is_inflated) {
5398 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5399 method_definition = imethod->declaring;
5402 /* SkipVerification is not allowed if core-clr is enabled */
5403 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5405 dont_verify_stloc = TRUE;
5408 if (!dont_verify && mini_method_verify (cfg, method_definition))
5409 goto exception_exit;
5411 if (mono_debug_using_mono_debugger ())
5412 cfg->keep_cil_nops = TRUE;
5414 if (sig->is_inflated)
5415 generic_context = mono_method_get_context (method);
5416 else if (generic_container)
5417 generic_context = &generic_container->context;
5418 cfg->generic_context = generic_context;
5420 if (!cfg->generic_sharing_context)
5421 g_assert (!sig->has_type_parameters);
5423 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5424 g_assert (method->is_inflated);
5425 g_assert (mono_method_get_context (method)->method_inst);
5427 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5428 g_assert (sig->generic_param_count);
5430 if (cfg->method == method) {
5431 cfg->real_offset = 0;
5433 cfg->real_offset = inline_offset;
5436 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5437 cfg->cil_offset_to_bb_len = header->code_size;
5439 cfg->current_method = method;
5441 if (cfg->verbose_level > 2)
5442 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5444 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5446 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5447 for (n = 0; n < sig->param_count; ++n)
5448 param_types [n + sig->hasthis] = sig->params [n];
5449 cfg->arg_types = param_types;
5451 dont_inline = g_list_prepend (dont_inline, method);
5452 if (cfg->method == method) {
5454 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5455 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5458 NEW_BBLOCK (cfg, start_bblock);
5459 cfg->bb_entry = start_bblock;
5460 start_bblock->cil_code = NULL;
5461 start_bblock->cil_length = 0;
5464 NEW_BBLOCK (cfg, end_bblock);
5465 cfg->bb_exit = end_bblock;
5466 end_bblock->cil_code = NULL;
5467 end_bblock->cil_length = 0;
5468 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5469 g_assert (cfg->num_bblocks == 2);
5471 arg_array = cfg->args;
5473 if (header->num_clauses) {
5474 cfg->spvars = g_hash_table_new (NULL, NULL);
5475 cfg->exvars = g_hash_table_new (NULL, NULL);
5477 /* handle exception clauses */
5478 for (i = 0; i < header->num_clauses; ++i) {
5479 MonoBasicBlock *try_bb;
5480 MonoExceptionClause *clause = &header->clauses [i];
5481 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5482 try_bb->real_offset = clause->try_offset;
5483 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5484 tblock->real_offset = clause->handler_offset;
5485 tblock->flags |= BB_EXCEPTION_HANDLER;
5487 link_bblock (cfg, try_bb, tblock);
5489 if (*(ip + clause->handler_offset) == CEE_POP)
5490 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5492 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5493 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5494 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5495 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5496 MONO_ADD_INS (tblock, ins);
5498 /* todo: is a fault block unsafe to optimize? */
5499 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5500 tblock->flags |= BB_EXCEPTION_UNSAFE;
5504 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5506 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5508 /* catch and filter blocks get the exception object on the stack */
5509 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5510 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5511 MonoInst *dummy_use;
5513 /* mostly like handle_stack_args (), but just sets the input args */
5514 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5515 tblock->in_scount = 1;
5516 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5517 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5520 * Add a dummy use for the exvar so its liveness info will be
5524 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5526 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5527 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5528 tblock->flags |= BB_EXCEPTION_HANDLER;
5529 tblock->real_offset = clause->data.filter_offset;
5530 tblock->in_scount = 1;
5531 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5532 /* The filter block shares the exvar with the handler block */
5533 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5534 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5535 MONO_ADD_INS (tblock, ins);
5539 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5540 clause->data.catch_class &&
5541 cfg->generic_sharing_context &&
5542 mono_class_check_context_used (clause->data.catch_class)) {
5544 * In shared generic code with catch
5545 * clauses containing type variables
5546 * the exception handling code has to
5547 * be able to get to the rgctx.
5548 * Therefore we have to make sure that
5549 * the vtable/mrgctx argument (for
5550 * static or generic methods) or the
5551 * "this" argument (for non-static
5552 * methods) are live.
5554 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5555 mini_method_get_context (method)->method_inst ||
5556 method->klass->valuetype) {
5557 mono_get_vtable_var (cfg);
5559 MonoInst *dummy_use;
5561 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5566 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5567 cfg->cbb = start_bblock;
5568 cfg->args = arg_array;
5569 mono_save_args (cfg, sig, inline_args);
5572 /* FIRST CODE BLOCK */
5573 NEW_BBLOCK (cfg, bblock);
5574 bblock->cil_code = ip;
5578 ADD_BBLOCK (cfg, bblock);
5580 if (cfg->method == method) {
5581 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5582 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5583 MONO_INST_NEW (cfg, ins, OP_BREAK);
5584 MONO_ADD_INS (bblock, ins);
5588 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5589 secman = mono_security_manager_get_methods ();
5591 security = (secman && mono_method_has_declsec (method));
5592 /* at this point having security doesn't mean we have any code to generate */
5593 if (security && (cfg->method == method)) {
5594 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5595 * And we do not want to enter the next section (with allocation) if we
5596 * have nothing to generate */
5597 security = mono_declsec_get_demands (method, &actions);
5600 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5601 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5603 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5604 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5605 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5607 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5608 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5612 mono_custom_attrs_free (custom);
5615 custom = mono_custom_attrs_from_class (wrapped->klass);
5616 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5620 mono_custom_attrs_free (custom);
5623 /* not a P/Invoke after all */
5628 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5629 /* we use a separate basic block for the initialization code */
5630 NEW_BBLOCK (cfg, init_localsbb);
5631 cfg->bb_init = init_localsbb;
5632 init_localsbb->real_offset = cfg->real_offset;
5633 start_bblock->next_bb = init_localsbb;
5634 init_localsbb->next_bb = bblock;
5635 link_bblock (cfg, start_bblock, init_localsbb);
5636 link_bblock (cfg, init_localsbb, bblock);
5638 cfg->cbb = init_localsbb;
5640 start_bblock->next_bb = bblock;
5641 link_bblock (cfg, start_bblock, bblock);
5644 /* at this point we know, if security is TRUE, that some code needs to be generated */
5645 if (security && (cfg->method == method)) {
5648 mono_jit_stats.cas_demand_generation++;
5650 if (actions.demand.blob) {
5651 /* Add code for SecurityAction.Demand */
5652 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5653 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5654 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5655 mono_emit_method_call (cfg, secman->demand, args, NULL);
5657 if (actions.noncasdemand.blob) {
5658 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5659 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5660 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5661 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5662 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5663 mono_emit_method_call (cfg, secman->demand, args, NULL);
5665 if (actions.demandchoice.blob) {
5666 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5667 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5668 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5669 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5670 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5674 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5676 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5679 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5680 /* check if this is native code, e.g. an icall or a p/invoke */
5681 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5682 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5684 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5685 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5687 /* if this ia a native call then it can only be JITted from platform code */
5688 if ((icall || pinvk) && method->klass && method->klass->image) {
5689 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5690 MonoException *ex = icall ? mono_get_exception_security () :
5691 mono_get_exception_method_access ();
5692 emit_throw_exception (cfg, ex);
5699 if (header->code_size == 0)
5702 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5707 if (cfg->method == method)
5708 mono_debug_init_method (cfg, bblock, breakpoint_id);
5710 for (n = 0; n < header->num_locals; ++n) {
5711 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5716 /* We force the vtable variable here for all shared methods
5717 for the possibility that they might show up in a stack
5718 trace where their exact instantiation is needed. */
5719 if (cfg->generic_sharing_context && method == cfg->method) {
5720 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5721 mini_method_get_context (method)->method_inst ||
5722 method->klass->valuetype) {
5723 mono_get_vtable_var (cfg);
5725 /* FIXME: Is there a better way to do this?
5726 We need the variable live for the duration
5727 of the whole method. */
5728 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5732 /* add a check for this != NULL to inlined methods */
5733 if (is_virtual_call) {
5736 NEW_ARGLOAD (cfg, arg_ins, 0);
5737 MONO_ADD_INS (cfg->cbb, arg_ins);
5738 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5741 skip_dead_blocks = !dont_verify;
5742 if (skip_dead_blocks) {
5743 original_bb = bb = mono_basic_block_split (method, &error);
5744 if (!mono_error_ok (&error)) {
5745 mono_error_cleanup (&error);
5751 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5752 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5755 start_new_bblock = 0;
5758 if (cfg->method == method)
5759 cfg->real_offset = ip - header->code;
5761 cfg->real_offset = inline_offset;
5766 if (start_new_bblock) {
5767 bblock->cil_length = ip - bblock->cil_code;
5768 if (start_new_bblock == 2) {
5769 g_assert (ip == tblock->cil_code);
5771 GET_BBLOCK (cfg, tblock, ip);
5773 bblock->next_bb = tblock;
5776 start_new_bblock = 0;
5777 for (i = 0; i < bblock->in_scount; ++i) {
5778 if (cfg->verbose_level > 3)
5779 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5780 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5784 g_slist_free (class_inits);
5787 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5788 link_bblock (cfg, bblock, tblock);
5789 if (sp != stack_start) {
5790 handle_stack_args (cfg, stack_start, sp - stack_start);
5792 CHECK_UNVERIFIABLE (cfg);
5794 bblock->next_bb = tblock;
5797 for (i = 0; i < bblock->in_scount; ++i) {
5798 if (cfg->verbose_level > 3)
5799 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5800 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5803 g_slist_free (class_inits);
5808 if (skip_dead_blocks) {
5809 int ip_offset = ip - header->code;
5811 if (ip_offset == bb->end)
5815 int op_size = mono_opcode_size (ip, end);
5816 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5818 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5820 if (ip_offset + op_size == bb->end) {
5821 MONO_INST_NEW (cfg, ins, OP_NOP);
5822 MONO_ADD_INS (bblock, ins);
5823 start_new_bblock = 1;
5831 * Sequence points are points where the debugger can place a breakpoint.
5832 * Currently, we generate these automatically at points where the IL
5835 if (seq_points && sp == stack_start) {
5836 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5837 MONO_ADD_INS (cfg->cbb, ins);
5840 bblock->real_offset = cfg->real_offset;
5842 if ((cfg->method == method) && cfg->coverage_info) {
5843 guint32 cil_offset = ip - header->code;
5844 cfg->coverage_info->data [cil_offset].cil_code = ip;
5846 /* TODO: Use an increment here */
5847 #if defined(TARGET_X86)
5848 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5849 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5851 MONO_ADD_INS (cfg->cbb, ins);
5853 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5854 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5858 if (cfg->verbose_level > 3)
5859 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5863 if (cfg->keep_cil_nops)
5864 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5866 MONO_INST_NEW (cfg, ins, OP_NOP);
5868 MONO_ADD_INS (bblock, ins);
5871 if (should_insert_brekpoint (cfg->method))
5872 MONO_INST_NEW (cfg, ins, OP_BREAK);
5874 MONO_INST_NEW (cfg, ins, OP_NOP);
5876 MONO_ADD_INS (bblock, ins);
5882 CHECK_STACK_OVF (1);
5883 n = (*ip)-CEE_LDARG_0;
5885 EMIT_NEW_ARGLOAD (cfg, ins, n);
5893 CHECK_STACK_OVF (1);
5894 n = (*ip)-CEE_LDLOC_0;
5896 EMIT_NEW_LOCLOAD (cfg, ins, n);
5905 n = (*ip)-CEE_STLOC_0;
5908 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5910 emit_stloc_ir (cfg, sp, header, n);
5917 CHECK_STACK_OVF (1);
5920 EMIT_NEW_ARGLOAD (cfg, ins, n);
5926 CHECK_STACK_OVF (1);
5929 NEW_ARGLOADA (cfg, ins, n);
5930 MONO_ADD_INS (cfg->cbb, ins);
5940 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5942 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5947 CHECK_STACK_OVF (1);
5950 EMIT_NEW_LOCLOAD (cfg, ins, n);
5954 case CEE_LDLOCA_S: {
5955 unsigned char *tmp_ip;
5957 CHECK_STACK_OVF (1);
5958 CHECK_LOCAL (ip [1]);
5960 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5966 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5975 CHECK_LOCAL (ip [1]);
5976 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5978 emit_stloc_ir (cfg, sp, header, ip [1]);
5983 CHECK_STACK_OVF (1);
5984 EMIT_NEW_PCONST (cfg, ins, NULL);
5985 ins->type = STACK_OBJ;
5990 CHECK_STACK_OVF (1);
5991 EMIT_NEW_ICONST (cfg, ins, -1);
6004 CHECK_STACK_OVF (1);
6005 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6011 CHECK_STACK_OVF (1);
6013 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6019 CHECK_STACK_OVF (1);
6020 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6026 CHECK_STACK_OVF (1);
6027 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6028 ins->type = STACK_I8;
6029 ins->dreg = alloc_dreg (cfg, STACK_I8);
6031 ins->inst_l = (gint64)read64 (ip);
6032 MONO_ADD_INS (bblock, ins);
6038 gboolean use_aotconst = FALSE;
6040 #ifdef TARGET_POWERPC
6041 /* FIXME: Clean this up */
6042 if (cfg->compile_aot)
6043 use_aotconst = TRUE;
6046 /* FIXME: we should really allocate this only late in the compilation process */
6047 f = mono_domain_alloc (cfg->domain, sizeof (float));
6049 CHECK_STACK_OVF (1);
6055 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6057 dreg = alloc_freg (cfg);
6058 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6059 ins->type = STACK_R8;
6061 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6062 ins->type = STACK_R8;
6063 ins->dreg = alloc_dreg (cfg, STACK_R8);
6065 MONO_ADD_INS (bblock, ins);
6075 gboolean use_aotconst = FALSE;
6077 #ifdef TARGET_POWERPC
6078 /* FIXME: Clean this up */
6079 if (cfg->compile_aot)
6080 use_aotconst = TRUE;
6083 /* FIXME: we should really allocate this only late in the compilation process */
6084 d = mono_domain_alloc (cfg->domain, sizeof (double));
6086 CHECK_STACK_OVF (1);
6092 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6094 dreg = alloc_freg (cfg);
6095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6096 ins->type = STACK_R8;
6098 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6099 ins->type = STACK_R8;
6100 ins->dreg = alloc_dreg (cfg, STACK_R8);
6102 MONO_ADD_INS (bblock, ins);
6111 MonoInst *temp, *store;
6113 CHECK_STACK_OVF (1);
6117 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6118 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6120 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6123 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6136 if (sp [0]->type == STACK_R8)
6137 /* we need to pop the value from the x86 FP stack */
6138 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6147 if (stack_start != sp)
6149 token = read32 (ip + 1);
6150 /* FIXME: check the signature matches */
6151 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6153 if (!cmethod || mono_loader_get_last_error ())
6156 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6157 GENERIC_SHARING_FAILURE (CEE_JMP);
6159 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6160 CHECK_CFG_EXCEPTION;
6162 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6164 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6167 /* Handle tail calls similarly to calls */
6168 n = fsig->param_count + fsig->hasthis;
6170 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6171 call->method = cmethod;
6172 call->tail_call = TRUE;
6173 call->signature = mono_method_signature (cmethod);
6174 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6175 call->inst.inst_p0 = cmethod;
6176 for (i = 0; i < n; ++i)
6177 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6179 mono_arch_emit_call (cfg, call);
6180 MONO_ADD_INS (bblock, (MonoInst*)call);
6183 for (i = 0; i < num_args; ++i)
6184 /* Prevent arguments from being optimized away */
6185 arg_array [i]->flags |= MONO_INST_VOLATILE;
6187 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6188 ins = (MonoInst*)call;
6189 ins->inst_p0 = cmethod;
6190 MONO_ADD_INS (bblock, ins);
6194 start_new_bblock = 1;
6199 case CEE_CALLVIRT: {
6200 MonoInst *addr = NULL;
6201 MonoMethodSignature *fsig = NULL;
6203 int virtual = *ip == CEE_CALLVIRT;
6204 int calli = *ip == CEE_CALLI;
6205 gboolean pass_imt_from_rgctx = FALSE;
6206 MonoInst *imt_arg = NULL;
6207 gboolean pass_vtable = FALSE;
6208 gboolean pass_mrgctx = FALSE;
6209 MonoInst *vtable_arg = NULL;
6210 gboolean check_this = FALSE;
6211 gboolean supported_tail_call = FALSE;
6214 token = read32 (ip + 1);
6221 if (method->wrapper_type != MONO_WRAPPER_NONE)
6222 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6224 fsig = mono_metadata_parse_signature (image, token);
6226 n = fsig->param_count + fsig->hasthis;
6228 if (method->dynamic && fsig->pinvoke) {
6232 * This is a call through a function pointer using a pinvoke
6233 * signature. Have to create a wrapper and call that instead.
6234 * FIXME: This is very slow, need to create a wrapper at JIT time
6235 * instead based on the signature.
6237 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6238 EMIT_NEW_PCONST (cfg, args [1], fsig);
6240 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6243 MonoMethod *cil_method;
6245 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6246 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6247 cil_method = cmethod;
6248 } else if (constrained_call) {
6249 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6251 * This is needed since get_method_constrained can't find
6252 * the method in klass representing a type var.
6253 * The type var is guaranteed to be a reference type in this
6256 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6257 cil_method = cmethod;
6258 g_assert (!cmethod->klass->valuetype);
6260 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6263 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6264 cil_method = cmethod;
6267 if (!cmethod || mono_loader_get_last_error ())
6269 if (!dont_verify && !cfg->skip_visibility) {
6270 MonoMethod *target_method = cil_method;
6271 if (method->is_inflated) {
6272 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6274 if (!mono_method_can_access_method (method_definition, target_method) &&
6275 !mono_method_can_access_method (method, cil_method))
6276 METHOD_ACCESS_FAILURE;
6279 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6280 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6282 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6283 /* MS.NET seems to silently convert this to a callvirt */
6288 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6289 * converts to a callvirt.
6291 * tests/bug-515884.il is an example of this behavior
6293 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6294 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6295 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6299 if (!cmethod->klass->inited)
6300 if (!mono_class_init (cmethod->klass))
6303 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6304 mini_class_is_system_array (cmethod->klass)) {
6305 array_rank = cmethod->klass->rank;
6306 fsig = mono_method_signature (cmethod);
6308 fsig = mono_method_signature (cmethod);
6313 if (fsig->pinvoke) {
6314 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6315 check_for_pending_exc, FALSE);
6316 fsig = mono_method_signature (wrapper);
6317 } else if (constrained_call) {
6318 fsig = mono_method_signature (cmethod);
6320 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6324 mono_save_token_info (cfg, image, token, cil_method);
6326 n = fsig->param_count + fsig->hasthis;
6328 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6329 if (check_linkdemand (cfg, method, cmethod))
6331 CHECK_CFG_EXCEPTION;
6334 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6335 g_assert_not_reached ();
6338 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6341 if (!cfg->generic_sharing_context && cmethod)
6342 g_assert (!mono_method_check_context_used (cmethod));
6346 //g_assert (!virtual || fsig->hasthis);
6350 if (constrained_call) {
6352 * We have the `constrained.' prefix opcode.
6354 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6356 * The type parameter is instantiated as a valuetype,
6357 * but that type doesn't override the method we're
6358 * calling, so we need to box `this'.
6360 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6361 ins->klass = constrained_call;
6362 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6363 CHECK_CFG_EXCEPTION;
6364 } else if (!constrained_call->valuetype) {
6365 int dreg = alloc_preg (cfg);
6368 * The type parameter is instantiated as a reference
6369 * type. We have a managed pointer on the stack, so
6370 * we need to dereference it here.
6372 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6373 ins->type = STACK_OBJ;
6375 } else if (cmethod->klass->valuetype)
6377 constrained_call = NULL;
6380 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6384 * If the callee is a shared method, then its static cctor
6385 * might not get called after the call was patched.
6387 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6388 emit_generic_class_init (cfg, cmethod->klass);
6389 CHECK_TYPELOAD (cmethod->klass);
6392 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6393 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6394 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6395 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6396 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6399 * Pass vtable iff target method might
6400 * be shared, which means that sharing
6401 * is enabled for its class and its
6402 * context is sharable (and it's not a
6405 if (sharing_enabled && context_sharable &&
6406 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6410 if (cmethod && mini_method_get_context (cmethod) &&
6411 mini_method_get_context (cmethod)->method_inst) {
6412 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6413 MonoGenericContext *context = mini_method_get_context (cmethod);
6414 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6416 g_assert (!pass_vtable);
6418 if (sharing_enabled && context_sharable)
6422 if (cfg->generic_sharing_context && cmethod) {
6423 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6425 context_used = mono_method_check_context_used (cmethod);
6427 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6428 /* Generic method interface
6429 calls are resolved via a
6430 helper function and don't
6432 if (!cmethod_context || !cmethod_context->method_inst)
6433 pass_imt_from_rgctx = TRUE;
6437 * If a shared method calls another
6438 * shared method then the caller must
6439 * have a generic sharing context
6440 * because the magic trampoline
6441 * requires it. FIXME: We shouldn't
6442 * have to force the vtable/mrgctx
6443 * variable here. Instead there
6444 * should be a flag in the cfg to
6445 * request a generic sharing context.
6448 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6449 mono_get_vtable_var (cfg);
6454 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6456 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6458 CHECK_TYPELOAD (cmethod->klass);
6459 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6464 g_assert (!vtable_arg);
6466 if (!cfg->compile_aot) {
6468 * emit_get_rgctx_method () calls mono_class_vtable () so check
6469 * for type load errors before.
6471 mono_class_setup_vtable (cmethod->klass);
6472 CHECK_TYPELOAD (cmethod->klass);
6475 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6477 /* !marshalbyref is needed to properly handle generic methods + remoting */
6478 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6479 MONO_METHOD_IS_FINAL (cmethod)) &&
6480 !cmethod->klass->marshalbyref) {
6487 if (pass_imt_from_rgctx) {
6488 g_assert (!pass_vtable);
6491 imt_arg = emit_get_rgctx_method (cfg, context_used,
6492 cmethod, MONO_RGCTX_INFO_METHOD);
6496 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6498 /* Calling virtual generic methods */
6499 if (cmethod && virtual &&
6500 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6501 !(MONO_METHOD_IS_FINAL (cmethod) &&
6502 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6503 mono_method_signature (cmethod)->generic_param_count) {
6504 MonoInst *this_temp, *this_arg_temp, *store;
6505 MonoInst *iargs [4];
6507 g_assert (mono_method_signature (cmethod)->is_inflated);
6509 /* Prevent inlining of methods that contain indirect calls */
6512 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6513 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6514 g_assert (!imt_arg);
6516 g_assert (cmethod->is_inflated);
6517 imt_arg = emit_get_rgctx_method (cfg, context_used,
6518 cmethod, MONO_RGCTX_INFO_METHOD);
6519 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6523 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6524 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6525 MONO_ADD_INS (bblock, store);
6527 /* FIXME: This should be a managed pointer */
6528 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6530 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6531 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6532 cmethod, MONO_RGCTX_INFO_METHOD);
6533 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6534 addr = mono_emit_jit_icall (cfg,
6535 mono_helper_compile_generic_method, iargs);
6537 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6539 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6542 if (!MONO_TYPE_IS_VOID (fsig->ret))
6543 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6545 CHECK_CFG_EXCEPTION;
6552 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6553 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6555 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6559 /* FIXME: runtime generic context pointer for jumps? */
6560 /* FIXME: handle this for generic sharing eventually */
6561 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6564 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6567 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6568 /* Handle tail calls similarly to calls */
6569 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6571 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6572 call->tail_call = TRUE;
6573 call->method = cmethod;
6574 call->signature = mono_method_signature (cmethod);
6577 * We implement tail calls by storing the actual arguments into the
6578 * argument variables, then emitting a CEE_JMP.
6580 for (i = 0; i < n; ++i) {
6581 /* Prevent argument from being register allocated */
6582 arg_array [i]->flags |= MONO_INST_VOLATILE;
6583 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6587 ins = (MonoInst*)call;
6588 ins->inst_p0 = cmethod;
6589 ins->inst_p1 = arg_array [0];
6590 MONO_ADD_INS (bblock, ins);
6591 link_bblock (cfg, bblock, end_bblock);
6592 start_new_bblock = 1;
6594 CHECK_CFG_EXCEPTION;
6596 /* skip CEE_RET as well */
6602 /* Conversion to a JIT intrinsic */
6603 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6605 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6606 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6611 CHECK_CFG_EXCEPTION;
6619 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6620 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6621 mono_method_check_inlining (cfg, cmethod) &&
6622 !g_list_find (dont_inline, cmethod)) {
6624 gboolean allways = FALSE;
6626 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6627 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6628 /* Prevent inlining of methods that call wrappers */
6630 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6634 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6636 cfg->real_offset += 5;
6639 if (!MONO_TYPE_IS_VOID (fsig->ret))
6640 /* *sp is already set by inline_method */
6643 inline_costs += costs;
6649 inline_costs += 10 * num_calls++;
6651 /* Tail recursion elimination */
6652 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6653 gboolean has_vtargs = FALSE;
6656 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6659 /* keep it simple */
6660 for (i = fsig->param_count - 1; i >= 0; i--) {
6661 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6666 for (i = 0; i < n; ++i)
6667 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6668 MONO_INST_NEW (cfg, ins, OP_BR);
6669 MONO_ADD_INS (bblock, ins);
6670 tblock = start_bblock->out_bb [0];
6671 link_bblock (cfg, bblock, tblock);
6672 ins->inst_target_bb = tblock;
6673 start_new_bblock = 1;
6675 /* skip the CEE_RET, too */
6676 if (ip_in_bb (cfg, bblock, ip + 5))
6686 /* Generic sharing */
6687 /* FIXME: only do this for generic methods if
6688 they are not shared! */
6689 if (context_used && !imt_arg && !array_rank &&
6690 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6691 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6692 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6693 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6696 g_assert (cfg->generic_sharing_context && cmethod);
6700 * We are compiling a call to a
6701 * generic method from shared code,
6702 * which means that we have to look up
6703 * the method in the rgctx and do an
6706 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6709 /* Indirect calls */
6711 g_assert (!imt_arg);
6713 if (*ip == CEE_CALL)
6714 g_assert (context_used);
6715 else if (*ip == CEE_CALLI)
6716 g_assert (!vtable_arg);
6718 /* FIXME: what the hell is this??? */
6719 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6720 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6722 /* Prevent inlining of methods with indirect calls */
6727 int rgctx_reg = mono_alloc_preg (cfg);
6729 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6730 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6731 call = (MonoCallInst*)ins;
6732 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6734 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6736 * Instead of emitting an indirect call, emit a direct call
6737 * with the contents of the aotconst as the patch info.
6739 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6741 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6742 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6745 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6748 if (!MONO_TYPE_IS_VOID (fsig->ret))
6749 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6751 CHECK_CFG_EXCEPTION;
6762 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6763 if (sp [fsig->param_count]->type == STACK_OBJ) {
6764 MonoInst *iargs [2];
6767 iargs [1] = sp [fsig->param_count];
6769 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6772 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6773 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6774 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6775 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6777 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6780 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6781 if (!cmethod->klass->element_class->valuetype && !readonly)
6782 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6783 CHECK_TYPELOAD (cmethod->klass);
6786 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6789 g_assert_not_reached ();
6792 CHECK_CFG_EXCEPTION;
6799 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6801 if (!MONO_TYPE_IS_VOID (fsig->ret))
6802 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6804 CHECK_CFG_EXCEPTION;
6814 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6816 } else if (imt_arg) {
6817 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6819 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6822 if (!MONO_TYPE_IS_VOID (fsig->ret))
6823 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6825 CHECK_CFG_EXCEPTION;
6832 if (cfg->method != method) {
6833 /* return from inlined method */
6835 * If in_count == 0, that means the ret is unreachable due to
6836 * being preceeded by a throw. In that case, inline_method () will
6837 * handle setting the return value
6838 * (test case: test_0_inline_throw ()).
6840 if (return_var && cfg->cbb->in_count) {
6844 //g_assert (returnvar != -1);
6845 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6846 cfg->ret_var_set = TRUE;
6850 MonoType *ret_type = mono_method_signature (method)->ret;
6854 * Place a seq point here too even through the IL stack is not
6855 * empty, so a step over on
6858 * will work correctly.
6860 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6861 MONO_ADD_INS (cfg->cbb, ins);
6864 g_assert (!return_var);
6867 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6870 if (!cfg->vret_addr) {
6873 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6875 EMIT_NEW_RETLOADA (cfg, ret_addr);
6877 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6878 ins->klass = mono_class_from_mono_type (ret_type);
6881 #ifdef MONO_ARCH_SOFT_FLOAT
6882 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6883 MonoInst *iargs [1];
6887 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6888 mono_arch_emit_setret (cfg, method, conv);
6890 mono_arch_emit_setret (cfg, method, *sp);
6893 mono_arch_emit_setret (cfg, method, *sp);
6898 if (sp != stack_start)
6900 MONO_INST_NEW (cfg, ins, OP_BR);
6902 ins->inst_target_bb = end_bblock;
6903 MONO_ADD_INS (bblock, ins);
6904 link_bblock (cfg, bblock, end_bblock);
6905 start_new_bblock = 1;
6909 MONO_INST_NEW (cfg, ins, OP_BR);
6911 target = ip + 1 + (signed char)(*ip);
6913 GET_BBLOCK (cfg, tblock, target);
6914 link_bblock (cfg, bblock, tblock);
6915 ins->inst_target_bb = tblock;
6916 if (sp != stack_start) {
6917 handle_stack_args (cfg, stack_start, sp - stack_start);
6919 CHECK_UNVERIFIABLE (cfg);
6921 MONO_ADD_INS (bblock, ins);
6922 start_new_bblock = 1;
6923 inline_costs += BRANCH_COST;
6937 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6939 target = ip + 1 + *(signed char*)ip;
6945 inline_costs += BRANCH_COST;
6949 MONO_INST_NEW (cfg, ins, OP_BR);
6952 target = ip + 4 + (gint32)read32(ip);
6954 GET_BBLOCK (cfg, tblock, target);
6955 link_bblock (cfg, bblock, tblock);
6956 ins->inst_target_bb = tblock;
6957 if (sp != stack_start) {
6958 handle_stack_args (cfg, stack_start, sp - stack_start);
6960 CHECK_UNVERIFIABLE (cfg);
6963 MONO_ADD_INS (bblock, ins);
6965 start_new_bblock = 1;
6966 inline_costs += BRANCH_COST;
6973 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6974 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6975 guint32 opsize = is_short ? 1 : 4;
6977 CHECK_OPSIZE (opsize);
6979 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6982 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6987 GET_BBLOCK (cfg, tblock, target);
6988 link_bblock (cfg, bblock, tblock);
6989 GET_BBLOCK (cfg, tblock, ip);
6990 link_bblock (cfg, bblock, tblock);
6992 if (sp != stack_start) {
6993 handle_stack_args (cfg, stack_start, sp - stack_start);
6994 CHECK_UNVERIFIABLE (cfg);
6997 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6998 cmp->sreg1 = sp [0]->dreg;
6999 type_from_op (cmp, sp [0], NULL);
7002 #if SIZEOF_REGISTER == 4
7003 if (cmp->opcode == OP_LCOMPARE_IMM) {
7004 /* Convert it to OP_LCOMPARE */
7005 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7006 ins->type = STACK_I8;
7007 ins->dreg = alloc_dreg (cfg, STACK_I8);
7009 MONO_ADD_INS (bblock, ins);
7010 cmp->opcode = OP_LCOMPARE;
7011 cmp->sreg2 = ins->dreg;
7014 MONO_ADD_INS (bblock, cmp);
7016 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7017 type_from_op (ins, sp [0], NULL);
7018 MONO_ADD_INS (bblock, ins);
7019 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7020 GET_BBLOCK (cfg, tblock, target);
7021 ins->inst_true_bb = tblock;
7022 GET_BBLOCK (cfg, tblock, ip);
7023 ins->inst_false_bb = tblock;
7024 start_new_bblock = 2;
7027 inline_costs += BRANCH_COST;
7042 MONO_INST_NEW (cfg, ins, *ip);
7044 target = ip + 4 + (gint32)read32(ip);
7050 inline_costs += BRANCH_COST;
7054 MonoBasicBlock **targets;
7055 MonoBasicBlock *default_bblock;
7056 MonoJumpInfoBBTable *table;
7057 int offset_reg = alloc_preg (cfg);
7058 int target_reg = alloc_preg (cfg);
7059 int table_reg = alloc_preg (cfg);
7060 int sum_reg = alloc_preg (cfg);
7061 gboolean use_op_switch;
7065 n = read32 (ip + 1);
7068 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7072 CHECK_OPSIZE (n * sizeof (guint32));
7073 target = ip + n * sizeof (guint32);
7075 GET_BBLOCK (cfg, default_bblock, target);
7076 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7078 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7079 for (i = 0; i < n; ++i) {
7080 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7081 targets [i] = tblock;
7082 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7086 if (sp != stack_start) {
7088 * Link the current bb with the targets as well, so handle_stack_args
7089 * will set their in_stack correctly.
7091 link_bblock (cfg, bblock, default_bblock);
7092 for (i = 0; i < n; ++i)
7093 link_bblock (cfg, bblock, targets [i]);
7095 handle_stack_args (cfg, stack_start, sp - stack_start);
7097 CHECK_UNVERIFIABLE (cfg);
7100 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7101 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7104 for (i = 0; i < n; ++i)
7105 link_bblock (cfg, bblock, targets [i]);
7107 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7108 table->table = targets;
7109 table->table_size = n;
7111 use_op_switch = FALSE;
7113 /* ARM implements SWITCH statements differently */
7114 /* FIXME: Make it use the generic implementation */
7115 if (!cfg->compile_aot)
7116 use_op_switch = TRUE;
7119 if (COMPILE_LLVM (cfg))
7120 use_op_switch = TRUE;
7122 cfg->cbb->has_jump_table = 1;
7124 if (use_op_switch) {
7125 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7126 ins->sreg1 = src1->dreg;
7127 ins->inst_p0 = table;
7128 ins->inst_many_bb = targets;
7129 ins->klass = GUINT_TO_POINTER (n);
7130 MONO_ADD_INS (cfg->cbb, ins);
7132 if (sizeof (gpointer) == 8)
7133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7137 #if SIZEOF_REGISTER == 8
7138 /* The upper word might not be zero, and we add it to a 64 bit address later */
7139 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7142 if (cfg->compile_aot) {
7143 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7145 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7146 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7147 ins->inst_p0 = table;
7148 ins->dreg = table_reg;
7149 MONO_ADD_INS (cfg->cbb, ins);
7152 /* FIXME: Use load_memindex */
7153 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7155 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7157 start_new_bblock = 1;
7158 inline_costs += (BRANCH_COST * 2);
7178 dreg = alloc_freg (cfg);
7181 dreg = alloc_lreg (cfg);
7184 dreg = alloc_preg (cfg);
7187 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7188 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7189 ins->flags |= ins_flag;
7191 MONO_ADD_INS (bblock, ins);
7206 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7207 ins->flags |= ins_flag;
7209 MONO_ADD_INS (bblock, ins);
7211 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7212 emit_write_barrier (cfg, sp [0], sp [1], -1);
7221 MONO_INST_NEW (cfg, ins, (*ip));
7223 ins->sreg1 = sp [0]->dreg;
7224 ins->sreg2 = sp [1]->dreg;
7225 type_from_op (ins, sp [0], sp [1]);
7227 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7229 /* Use the immediate opcodes if possible */
7230 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7231 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7232 if (imm_opcode != -1) {
7233 ins->opcode = imm_opcode;
7234 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7237 sp [1]->opcode = OP_NOP;
7241 MONO_ADD_INS ((cfg)->cbb, (ins));
7243 *sp++ = mono_decompose_opcode (cfg, ins);
7260 MONO_INST_NEW (cfg, ins, (*ip));
7262 ins->sreg1 = sp [0]->dreg;
7263 ins->sreg2 = sp [1]->dreg;
7264 type_from_op (ins, sp [0], sp [1]);
7266 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7267 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7269 /* FIXME: Pass opcode to is_inst_imm */
7271 /* Use the immediate opcodes if possible */
7272 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7275 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7276 if (imm_opcode != -1) {
7277 ins->opcode = imm_opcode;
7278 if (sp [1]->opcode == OP_I8CONST) {
7279 #if SIZEOF_REGISTER == 8
7280 ins->inst_imm = sp [1]->inst_l;
7282 ins->inst_ls_word = sp [1]->inst_ls_word;
7283 ins->inst_ms_word = sp [1]->inst_ms_word;
7287 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7290 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7291 if (sp [1]->next == NULL)
7292 sp [1]->opcode = OP_NOP;
7295 MONO_ADD_INS ((cfg)->cbb, (ins));
7297 *sp++ = mono_decompose_opcode (cfg, ins);
7310 case CEE_CONV_OVF_I8:
7311 case CEE_CONV_OVF_U8:
7315 /* Special case this earlier so we have long constants in the IR */
7316 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7317 int data = sp [-1]->inst_c0;
7318 sp [-1]->opcode = OP_I8CONST;
7319 sp [-1]->type = STACK_I8;
7320 #if SIZEOF_REGISTER == 8
7321 if ((*ip) == CEE_CONV_U8)
7322 sp [-1]->inst_c0 = (guint32)data;
7324 sp [-1]->inst_c0 = data;
7326 sp [-1]->inst_ls_word = data;
7327 if ((*ip) == CEE_CONV_U8)
7328 sp [-1]->inst_ms_word = 0;
7330 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7332 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7339 case CEE_CONV_OVF_I4:
7340 case CEE_CONV_OVF_I1:
7341 case CEE_CONV_OVF_I2:
7342 case CEE_CONV_OVF_I:
7343 case CEE_CONV_OVF_U:
7346 if (sp [-1]->type == STACK_R8) {
7347 ADD_UNOP (CEE_CONV_OVF_I8);
7354 case CEE_CONV_OVF_U1:
7355 case CEE_CONV_OVF_U2:
7356 case CEE_CONV_OVF_U4:
7359 if (sp [-1]->type == STACK_R8) {
7360 ADD_UNOP (CEE_CONV_OVF_U8);
7367 case CEE_CONV_OVF_I1_UN:
7368 case CEE_CONV_OVF_I2_UN:
7369 case CEE_CONV_OVF_I4_UN:
7370 case CEE_CONV_OVF_I8_UN:
7371 case CEE_CONV_OVF_U1_UN:
7372 case CEE_CONV_OVF_U2_UN:
7373 case CEE_CONV_OVF_U4_UN:
7374 case CEE_CONV_OVF_U8_UN:
7375 case CEE_CONV_OVF_I_UN:
7376 case CEE_CONV_OVF_U_UN:
7383 CHECK_CFG_EXCEPTION;
7387 case CEE_ADD_OVF_UN:
7389 case CEE_MUL_OVF_UN:
7391 case CEE_SUB_OVF_UN:
7399 token = read32 (ip + 1);
7400 klass = mini_get_class (method, token, generic_context);
7401 CHECK_TYPELOAD (klass);
7403 if (generic_class_is_reference_type (cfg, klass)) {
7404 MonoInst *store, *load;
7405 int dreg = alloc_preg (cfg);
7407 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7408 load->flags |= ins_flag;
7409 MONO_ADD_INS (cfg->cbb, load);
7411 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7412 store->flags |= ins_flag;
7413 MONO_ADD_INS (cfg->cbb, store);
7415 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7416 emit_write_barrier (cfg, sp [0], sp [1], -1);
7418 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7430 token = read32 (ip + 1);
7431 klass = mini_get_class (method, token, generic_context);
7432 CHECK_TYPELOAD (klass);
7434 /* Optimize the common ldobj+stloc combination */
7444 loc_index = ip [5] - CEE_STLOC_0;
7451 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7452 CHECK_LOCAL (loc_index);
7454 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7455 ins->dreg = cfg->locals [loc_index]->dreg;
7461 /* Optimize the ldobj+stobj combination */
7462 /* The reference case ends up being a load+store anyway */
7463 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7468 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7475 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7484 CHECK_STACK_OVF (1);
7486 n = read32 (ip + 1);
7488 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7489 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7490 ins->type = STACK_OBJ;
7493 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7494 MonoInst *iargs [1];
7496 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7497 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7499 if (cfg->opt & MONO_OPT_SHARED) {
7500 MonoInst *iargs [3];
7502 if (cfg->compile_aot) {
7503 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7505 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7506 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7507 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7508 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7509 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7511 if (bblock->out_of_line) {
7512 MonoInst *iargs [2];
7514 if (image == mono_defaults.corlib) {
7516 * Avoid relocations in AOT and save some space by using a
7517 * version of helper_ldstr specialized to mscorlib.
7519 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7520 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7522 /* Avoid creating the string object */
7523 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7524 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7525 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7529 if (cfg->compile_aot) {
7530 NEW_LDSTRCONST (cfg, ins, image, n);
7532 MONO_ADD_INS (bblock, ins);
7535 NEW_PCONST (cfg, ins, NULL);
7536 ins->type = STACK_OBJ;
7537 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7539 MONO_ADD_INS (bblock, ins);
7548 MonoInst *iargs [2];
7549 MonoMethodSignature *fsig;
7552 MonoInst *vtable_arg = NULL;
7555 token = read32 (ip + 1);
7556 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7557 if (!cmethod || mono_loader_get_last_error ())
7559 fsig = mono_method_get_signature (cmethod, image, token);
7563 mono_save_token_info (cfg, image, token, cmethod);
7565 if (!mono_class_init (cmethod->klass))
7568 if (cfg->generic_sharing_context)
7569 context_used = mono_method_check_context_used (cmethod);
7571 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7572 if (check_linkdemand (cfg, method, cmethod))
7574 CHECK_CFG_EXCEPTION;
7575 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7576 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7579 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7580 emit_generic_class_init (cfg, cmethod->klass);
7581 CHECK_TYPELOAD (cmethod->klass);
7584 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7585 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7586 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7587 mono_class_vtable (cfg->domain, cmethod->klass);
7588 CHECK_TYPELOAD (cmethod->klass);
7590 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7591 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7594 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7595 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7597 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7599 CHECK_TYPELOAD (cmethod->klass);
7600 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7605 n = fsig->param_count;
7609 * Generate smaller code for the common newobj <exception> instruction in
7610 * argument checking code.
7612 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7613 is_exception_class (cmethod->klass) && n <= 2 &&
7614 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7615 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7616 MonoInst *iargs [3];
7618 g_assert (!vtable_arg);
7622 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7625 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7629 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7634 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7637 g_assert_not_reached ();
7645 /* move the args to allow room for 'this' in the first position */
7651 /* check_call_signature () requires sp[0] to be set */
7652 this_ins.type = STACK_OBJ;
7654 if (check_call_signature (cfg, fsig, sp))
7659 if (mini_class_is_system_array (cmethod->klass)) {
7660 g_assert (!vtable_arg);
7662 *sp = emit_get_rgctx_method (cfg, context_used,
7663 cmethod, MONO_RGCTX_INFO_METHOD);
7665 /* Avoid varargs in the common case */
7666 if (fsig->param_count == 1)
7667 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7668 else if (fsig->param_count == 2)
7669 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7670 else if (fsig->param_count == 3)
7671 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7673 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7674 } else if (cmethod->string_ctor) {
7675 g_assert (!context_used);
7676 g_assert (!vtable_arg);
7677 /* we simply pass a null pointer */
7678 EMIT_NEW_PCONST (cfg, *sp, NULL);
7679 /* now call the string ctor */
7680 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7682 MonoInst* callvirt_this_arg = NULL;
7684 if (cmethod->klass->valuetype) {
7685 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7686 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7687 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7692 * The code generated by mini_emit_virtual_call () expects
7693 * iargs [0] to be a boxed instance, but luckily the vcall
7694 * will be transformed into a normal call there.
7696 } else if (context_used) {
7697 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7700 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7702 CHECK_TYPELOAD (cmethod->klass);
7705 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7706 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7707 * As a workaround, we call class cctors before allocating objects.
7709 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7710 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7711 if (cfg->verbose_level > 2)
7712 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7713 class_inits = g_slist_prepend (class_inits, vtable);
7716 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7719 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7722 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7724 /* Now call the actual ctor */
7725 /* Avoid virtual calls to ctors if possible */
7726 if (cmethod->klass->marshalbyref)
7727 callvirt_this_arg = sp [0];
7730 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7731 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7732 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7737 CHECK_CFG_EXCEPTION;
7742 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7743 mono_method_check_inlining (cfg, cmethod) &&
7744 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7745 !g_list_find (dont_inline, cmethod)) {
7748 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7749 cfg->real_offset += 5;
7752 inline_costs += costs - 5;
7755 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7757 } else if (context_used &&
7758 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7759 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7760 MonoInst *cmethod_addr;
7762 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7763 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7765 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7768 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7769 callvirt_this_arg, NULL, vtable_arg);
7773 if (alloc == NULL) {
7775 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7776 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7790 token = read32 (ip + 1);
7791 klass = mini_get_class (method, token, generic_context);
7792 CHECK_TYPELOAD (klass);
7793 if (sp [0]->type != STACK_OBJ)
7796 if (cfg->generic_sharing_context)
7797 context_used = mono_class_check_context_used (klass);
7799 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7806 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7808 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7812 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7813 MonoMethod *mono_castclass;
7814 MonoInst *iargs [1];
7817 mono_castclass = mono_marshal_get_castclass (klass);
7820 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7821 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7822 g_assert (costs > 0);
7825 cfg->real_offset += 5;
7830 inline_costs += costs;
7833 ins = handle_castclass (cfg, klass, *sp, context_used);
7834 CHECK_CFG_EXCEPTION;
7844 token = read32 (ip + 1);
7845 klass = mini_get_class (method, token, generic_context);
7846 CHECK_TYPELOAD (klass);
7847 if (sp [0]->type != STACK_OBJ)
7850 if (cfg->generic_sharing_context)
7851 context_used = mono_class_check_context_used (klass);
7853 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7860 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7862 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7866 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7867 MonoMethod *mono_isinst;
7868 MonoInst *iargs [1];
7871 mono_isinst = mono_marshal_get_isinst (klass);
7874 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7875 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7876 g_assert (costs > 0);
7879 cfg->real_offset += 5;
7884 inline_costs += costs;
7887 ins = handle_isinst (cfg, klass, *sp, context_used);
7888 CHECK_CFG_EXCEPTION;
7895 case CEE_UNBOX_ANY: {
7899 token = read32 (ip + 1);
7900 klass = mini_get_class (method, token, generic_context);
7901 CHECK_TYPELOAD (klass);
7903 mono_save_token_info (cfg, image, token, klass);
7905 if (cfg->generic_sharing_context)
7906 context_used = mono_class_check_context_used (klass);
7908 if (generic_class_is_reference_type (cfg, klass)) {
7909 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7910 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7911 MonoMethod *mono_castclass;
7912 MonoInst *iargs [1];
7915 mono_castclass = mono_marshal_get_castclass (klass);
7918 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7919 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7921 g_assert (costs > 0);
7924 cfg->real_offset += 5;
7928 inline_costs += costs;
7930 ins = handle_castclass (cfg, klass, *sp, context_used);
7931 CHECK_CFG_EXCEPTION;
7939 if (mono_class_is_nullable (klass)) {
7940 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7947 ins = handle_unbox (cfg, klass, sp, context_used);
7953 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7966 token = read32 (ip + 1);
7967 klass = mini_get_class (method, token, generic_context);
7968 CHECK_TYPELOAD (klass);
7970 mono_save_token_info (cfg, image, token, klass);
7972 if (cfg->generic_sharing_context)
7973 context_used = mono_class_check_context_used (klass);
7975 if (generic_class_is_reference_type (cfg, klass)) {
7981 if (klass == mono_defaults.void_class)
7983 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7985 /* frequent check in generic code: box (struct), brtrue */
7987 // FIXME: LLVM can't handle the inconsistent bb linking
7988 if (!mono_class_is_nullable (klass) &&
7989 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
7990 (ip [5] == CEE_BRTRUE ||
7991 ip [5] == CEE_BRTRUE_S ||
7992 ip [5] == CEE_BRFALSE ||
7993 ip [5] == CEE_BRFALSE_S)) {
7994 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
7996 MonoBasicBlock *true_bb, *false_bb;
8000 if (cfg->verbose_level > 3) {
8001 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8002 printf ("<box+brtrue opt>\n");
8010 target = ip + 1 + (signed char)(*ip);
8017 target = ip + 4 + (gint)(read32 (ip));
8021 g_assert_not_reached ();
8025 * We need to link both bblocks, since it is needed for handling stack
8026 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8027 * Branching to only one of them would lead to inconsistencies, so
8028 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8030 GET_BBLOCK (cfg, true_bb, target);
8031 GET_BBLOCK (cfg, false_bb, ip);
8033 mono_link_bblock (cfg, cfg->cbb, true_bb);
8034 mono_link_bblock (cfg, cfg->cbb, false_bb);
8036 if (sp != stack_start) {
8037 handle_stack_args (cfg, stack_start, sp - stack_start);
8039 CHECK_UNVERIFIABLE (cfg);
8042 if (COMPILE_LLVM (cfg)) {
8043 dreg = alloc_ireg (cfg);
8044 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8047 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8049 /* The JIT can't eliminate the iconst+compare */
8050 MONO_INST_NEW (cfg, ins, OP_BR);
8051 ins->inst_target_bb = is_true ? true_bb : false_bb;
8052 MONO_ADD_INS (cfg->cbb, ins);
8055 start_new_bblock = 1;
8059 *sp++ = handle_box (cfg, val, klass, context_used);
8061 CHECK_CFG_EXCEPTION;
8070 token = read32 (ip + 1);
8071 klass = mini_get_class (method, token, generic_context);
8072 CHECK_TYPELOAD (klass);
8074 mono_save_token_info (cfg, image, token, klass);
8076 if (cfg->generic_sharing_context)
8077 context_used = mono_class_check_context_used (klass);
8079 if (mono_class_is_nullable (klass)) {
8082 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8083 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8087 ins = handle_unbox (cfg, klass, sp, context_used);
8097 MonoClassField *field;
8101 if (*ip == CEE_STFLD) {
8108 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8110 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8113 token = read32 (ip + 1);
8114 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8115 field = mono_method_get_wrapper_data (method, token);
8116 klass = field->parent;
8119 field = mono_field_from_token (image, token, &klass, generic_context);
8123 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8124 FIELD_ACCESS_FAILURE;
8125 mono_class_init (klass);
8127 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8128 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8129 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8130 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8133 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8134 if (*ip == CEE_STFLD) {
8135 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8137 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8138 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8139 MonoInst *iargs [5];
8142 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8143 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8144 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8148 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8149 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8150 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8151 g_assert (costs > 0);
8153 cfg->real_offset += 5;
8156 inline_costs += costs;
8158 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8163 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8165 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8166 if (sp [0]->opcode != OP_LDADDR)
8167 store->flags |= MONO_INST_FAULT;
8169 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8170 /* insert call to write barrier */
8174 dreg = alloc_preg (cfg);
8175 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8176 emit_write_barrier (cfg, ptr, sp [1], -1);
8179 store->flags |= ins_flag;
8186 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8187 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8188 MonoInst *iargs [4];
8191 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8192 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8193 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8194 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8195 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8196 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8198 g_assert (costs > 0);
8200 cfg->real_offset += 5;
8204 inline_costs += costs;
8206 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8210 if (sp [0]->type == STACK_VTYPE) {
8213 /* Have to compute the address of the variable */
8215 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8217 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8219 g_assert (var->klass == klass);
8221 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8225 if (*ip == CEE_LDFLDA) {
8226 if (sp [0]->type == STACK_OBJ) {
8227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8228 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8231 dreg = alloc_preg (cfg);
8233 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8234 ins->klass = mono_class_from_mono_type (field->type);
8235 ins->type = STACK_MP;
8240 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8242 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8243 load->flags |= ins_flag;
8244 if (sp [0]->opcode != OP_LDADDR)
8245 load->flags |= MONO_INST_FAULT;
8256 MonoClassField *field;
8257 gpointer addr = NULL;
8258 gboolean is_special_static;
8261 token = read32 (ip + 1);
8263 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8264 field = mono_method_get_wrapper_data (method, token);
8265 klass = field->parent;
8268 field = mono_field_from_token (image, token, &klass, generic_context);
8271 mono_class_init (klass);
8272 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8273 FIELD_ACCESS_FAILURE;
8275 /* if the class is Critical then transparent code cannot access it's fields */
8276 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8277 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8280 * We can only support shared generic static
8281 * field access on architectures where the
8282 * trampoline code has been extended to handle
8283 * the generic class init.
8285 #ifndef MONO_ARCH_VTABLE_REG
8286 GENERIC_SHARING_FAILURE (*ip);
8289 if (cfg->generic_sharing_context)
8290 context_used = mono_class_check_context_used (klass);
8292 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8294 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8295 * to be called here.
8297 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8298 mono_class_vtable (cfg->domain, klass);
8299 CHECK_TYPELOAD (klass);
8301 mono_domain_lock (cfg->domain);
8302 if (cfg->domain->special_static_fields)
8303 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8304 mono_domain_unlock (cfg->domain);
8306 is_special_static = mono_class_field_is_special_static (field);
8308 /* Generate IR to compute the field address */
8309 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8311 * Fast access to TLS data
8312 * Inline version of get_thread_static_data () in
8316 int idx, static_data_reg, array_reg, dreg;
8317 MonoInst *thread_ins;
8319 // offset &= 0x7fffffff;
8320 // idx = (offset >> 24) - 1;
8321 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8323 thread_ins = mono_get_thread_intrinsic (cfg);
8324 MONO_ADD_INS (cfg->cbb, thread_ins);
8325 static_data_reg = alloc_ireg (cfg);
8326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8328 if (cfg->compile_aot) {
8329 int offset_reg, offset2_reg, idx_reg;
8331 /* For TLS variables, this will return the TLS offset */
8332 EMIT_NEW_SFLDACONST (cfg, ins, field);
8333 offset_reg = ins->dreg;
8334 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8335 idx_reg = alloc_ireg (cfg);
8336 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8338 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8339 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8340 array_reg = alloc_ireg (cfg);
8341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8342 offset2_reg = alloc_ireg (cfg);
8343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8344 dreg = alloc_ireg (cfg);
8345 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8347 offset = (gsize)addr & 0x7fffffff;
8348 idx = (offset >> 24) - 1;
8350 array_reg = alloc_ireg (cfg);
8351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8352 dreg = alloc_ireg (cfg);
8353 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8355 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8356 (cfg->compile_aot && is_special_static) ||
8357 (context_used && is_special_static)) {
8358 MonoInst *iargs [2];
8360 g_assert (field->parent);
8361 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8363 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8364 field, MONO_RGCTX_INFO_CLASS_FIELD);
8366 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8368 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8369 } else if (context_used) {
8370 MonoInst *static_data;
8373 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8374 method->klass->name_space, method->klass->name, method->name,
8375 depth, field->offset);
8378 if (mono_class_needs_cctor_run (klass, method))
8379 emit_generic_class_init (cfg, klass);
8382 * The pointer we're computing here is
8384 * super_info.static_data + field->offset
8386 static_data = emit_get_rgctx_klass (cfg, context_used,
8387 klass, MONO_RGCTX_INFO_STATIC_DATA);
8389 if (field->offset == 0) {
8392 int addr_reg = mono_alloc_preg (cfg);
8393 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8395 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8396 MonoInst *iargs [2];
8398 g_assert (field->parent);
8399 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8400 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8401 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8403 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8405 CHECK_TYPELOAD (klass);
8407 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8408 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8409 if (cfg->verbose_level > 2)
8410 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8411 class_inits = g_slist_prepend (class_inits, vtable);
8413 if (cfg->run_cctors) {
8415 /* This makes so that inline cannot trigger */
8416 /* .cctors: too many apps depend on them */
8417 /* running with a specific order... */
8418 if (! vtable->initialized)
8420 ex = mono_runtime_class_init_full (vtable, FALSE);
8422 set_exception_object (cfg, ex);
8423 goto exception_exit;
8427 addr = (char*)vtable->data + field->offset;
8429 if (cfg->compile_aot)
8430 EMIT_NEW_SFLDACONST (cfg, ins, field);
8432 EMIT_NEW_PCONST (cfg, ins, addr);
8434 MonoInst *iargs [1];
8435 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8436 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8440 /* Generate IR to do the actual load/store operation */
8442 if (*ip == CEE_LDSFLDA) {
8443 ins->klass = mono_class_from_mono_type (field->type);
8444 ins->type = STACK_PTR;
8446 } else if (*ip == CEE_STSFLD) {
8451 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8452 store->flags |= ins_flag;
8454 gboolean is_const = FALSE;
8455 MonoVTable *vtable = NULL;
8457 if (!context_used) {
8458 vtable = mono_class_vtable (cfg->domain, klass);
8459 CHECK_TYPELOAD (klass);
8461 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8462 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8463 gpointer addr = (char*)vtable->data + field->offset;
8464 int ro_type = field->type->type;
8465 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8466 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8468 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8471 case MONO_TYPE_BOOLEAN:
8473 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8477 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8480 case MONO_TYPE_CHAR:
8482 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8486 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8491 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8495 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8501 case MONO_TYPE_FNPTR:
8502 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8503 type_to_eval_stack_type ((cfg), field->type, *sp);
8506 case MONO_TYPE_STRING:
8507 case MONO_TYPE_OBJECT:
8508 case MONO_TYPE_CLASS:
8509 case MONO_TYPE_SZARRAY:
8510 case MONO_TYPE_ARRAY:
8511 if (!mono_gc_is_moving ()) {
8512 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8513 type_to_eval_stack_type ((cfg), field->type, *sp);
8521 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8526 case MONO_TYPE_VALUETYPE:
8536 CHECK_STACK_OVF (1);
8538 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8539 load->flags |= ins_flag;
8552 token = read32 (ip + 1);
8553 klass = mini_get_class (method, token, generic_context);
8554 CHECK_TYPELOAD (klass);
8555 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8556 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8557 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8558 generic_class_is_reference_type (cfg, klass)) {
8559 /* insert call to write barrier */
8560 emit_write_barrier (cfg, sp [0], sp [1], -1);
8572 const char *data_ptr;
8574 guint32 field_token;
8580 token = read32 (ip + 1);
8582 klass = mini_get_class (method, token, generic_context);
8583 CHECK_TYPELOAD (klass);
8585 if (cfg->generic_sharing_context)
8586 context_used = mono_class_check_context_used (klass);
8588 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8589 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8590 ins->sreg1 = sp [0]->dreg;
8591 ins->type = STACK_I4;
8592 ins->dreg = alloc_ireg (cfg);
8593 MONO_ADD_INS (cfg->cbb, ins);
8594 *sp = mono_decompose_opcode (cfg, ins);
8599 MonoClass *array_class = mono_array_class_get (klass, 1);
8600 /* FIXME: we cannot get a managed
8601 allocator because we can't get the
8602 open generic class's vtable. We
8603 have the same problem in
8604 handle_alloc(). This
8605 needs to be solved so that we can
8606 have managed allocs of shared
8609 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8610 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8612 MonoMethod *managed_alloc = NULL;
8614 /* FIXME: Decompose later to help abcrem */
8617 args [0] = emit_get_rgctx_klass (cfg, context_used,
8618 array_class, MONO_RGCTX_INFO_VTABLE);
8623 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8625 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8627 if (cfg->opt & MONO_OPT_SHARED) {
8628 /* Decompose now to avoid problems with references to the domainvar */
8629 MonoInst *iargs [3];
8631 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8632 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8635 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8637 /* Decompose later since it is needed by abcrem */
8638 MonoClass *array_type = mono_array_class_get (klass, 1);
8639 mono_class_vtable (cfg->domain, array_type);
8640 CHECK_TYPELOAD (array_type);
8642 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8643 ins->dreg = alloc_preg (cfg);
8644 ins->sreg1 = sp [0]->dreg;
8645 ins->inst_newa_class = klass;
8646 ins->type = STACK_OBJ;
8648 MONO_ADD_INS (cfg->cbb, ins);
8649 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8650 cfg->cbb->has_array_access = TRUE;
8652 /* Needed so mono_emit_load_get_addr () gets called */
8653 mono_get_got_var (cfg);
8663 * we inline/optimize the initialization sequence if possible.
8664 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8665 * for small sizes open code the memcpy
8666 * ensure the rva field is big enough
8668 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8669 MonoMethod *memcpy_method = get_memcpy_method ();
8670 MonoInst *iargs [3];
8671 int add_reg = alloc_preg (cfg);
8673 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8674 if (cfg->compile_aot) {
8675 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8677 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8679 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8680 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8689 if (sp [0]->type != STACK_OBJ)
8692 dreg = alloc_preg (cfg);
8693 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8694 ins->dreg = alloc_preg (cfg);
8695 ins->sreg1 = sp [0]->dreg;
8696 ins->type = STACK_I4;
8697 /* This flag will be inherited by the decomposition */
8698 ins->flags |= MONO_INST_FAULT;
8699 MONO_ADD_INS (cfg->cbb, ins);
8700 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8701 cfg->cbb->has_array_access = TRUE;
8709 if (sp [0]->type != STACK_OBJ)
8712 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8714 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8715 CHECK_TYPELOAD (klass);
8716 /* we need to make sure that this array is exactly the type it needs
8717 * to be for correctness. the wrappers are lax with their usage
8718 * so we need to ignore them here
8720 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8721 MonoClass *array_class = mono_array_class_get (klass, 1);
8722 mini_emit_check_array_type (cfg, sp [0], array_class);
8723 CHECK_TYPELOAD (array_class);
8727 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8742 case CEE_LDELEM_REF: {
8748 if (*ip == CEE_LDELEM) {
8750 token = read32 (ip + 1);
8751 klass = mini_get_class (method, token, generic_context);
8752 CHECK_TYPELOAD (klass);
8753 mono_class_init (klass);
8756 klass = array_access_to_klass (*ip);
8758 if (sp [0]->type != STACK_OBJ)
8761 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8763 if (sp [1]->opcode == OP_ICONST) {
8764 int array_reg = sp [0]->dreg;
8765 int index_reg = sp [1]->dreg;
8766 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8768 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8771 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8772 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8775 if (*ip == CEE_LDELEM)
8788 case CEE_STELEM_REF:
8795 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8797 if (*ip == CEE_STELEM) {
8799 token = read32 (ip + 1);
8800 klass = mini_get_class (method, token, generic_context);
8801 CHECK_TYPELOAD (klass);
8802 mono_class_init (klass);
8805 klass = array_access_to_klass (*ip);
8807 if (sp [0]->type != STACK_OBJ)
8810 /* storing a NULL doesn't need any of the complex checks in stelemref */
8811 if (generic_class_is_reference_type (cfg, klass) &&
8812 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8813 MonoMethod* helper = mono_marshal_get_stelemref ();
8814 MonoInst *iargs [3];
8816 if (sp [0]->type != STACK_OBJ)
8818 if (sp [2]->type != STACK_OBJ)
8825 mono_emit_method_call (cfg, helper, iargs, NULL);
8827 if (sp [1]->opcode == OP_ICONST) {
8828 int array_reg = sp [0]->dreg;
8829 int index_reg = sp [1]->dreg;
8830 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8832 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8833 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8835 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8836 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8840 if (*ip == CEE_STELEM)
8847 case CEE_CKFINITE: {
8851 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8852 ins->sreg1 = sp [0]->dreg;
8853 ins->dreg = alloc_freg (cfg);
8854 ins->type = STACK_R8;
8855 MONO_ADD_INS (bblock, ins);
8857 *sp++ = mono_decompose_opcode (cfg, ins);
8862 case CEE_REFANYVAL: {
8863 MonoInst *src_var, *src;
8865 int klass_reg = alloc_preg (cfg);
8866 int dreg = alloc_preg (cfg);
8869 MONO_INST_NEW (cfg, ins, *ip);
8872 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8873 CHECK_TYPELOAD (klass);
8874 mono_class_init (klass);
8876 if (cfg->generic_sharing_context)
8877 context_used = mono_class_check_context_used (klass);
8880 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8882 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8883 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8884 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8887 MonoInst *klass_ins;
8889 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8890 klass, MONO_RGCTX_INFO_KLASS);
8893 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8894 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8896 mini_emit_class_check (cfg, klass_reg, klass);
8898 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8899 ins->type = STACK_MP;
8904 case CEE_MKREFANY: {
8905 MonoInst *loc, *addr;
8908 MONO_INST_NEW (cfg, ins, *ip);
8911 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8912 CHECK_TYPELOAD (klass);
8913 mono_class_init (klass);
8915 if (cfg->generic_sharing_context)
8916 context_used = mono_class_check_context_used (klass);
8918 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8919 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8922 MonoInst *const_ins;
8923 int type_reg = alloc_preg (cfg);
8925 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8926 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8927 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8929 } else if (cfg->compile_aot) {
8930 int const_reg = alloc_preg (cfg);
8931 int type_reg = alloc_preg (cfg);
8933 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8934 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8936 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8938 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8939 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8941 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8943 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8944 ins->type = STACK_VTYPE;
8945 ins->klass = mono_defaults.typed_reference_class;
8952 MonoClass *handle_class;
8954 CHECK_STACK_OVF (1);
8957 n = read32 (ip + 1);
8959 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8960 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8961 handle = mono_method_get_wrapper_data (method, n);
8962 handle_class = mono_method_get_wrapper_data (method, n + 1);
8963 if (handle_class == mono_defaults.typehandle_class)
8964 handle = &((MonoClass*)handle)->byval_arg;
8967 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8971 mono_class_init (handle_class);
8972 if (cfg->generic_sharing_context) {
8973 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8974 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8975 /* This case handles ldtoken
8976 of an open type, like for
8979 } else if (handle_class == mono_defaults.typehandle_class) {
8980 /* If we get a MONO_TYPE_CLASS
8981 then we need to provide the
8983 instantiation of it. */
8984 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8987 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8988 } else if (handle_class == mono_defaults.fieldhandle_class)
8989 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8990 else if (handle_class == mono_defaults.methodhandle_class)
8991 context_used = mono_method_check_context_used (handle);
8993 g_assert_not_reached ();
8996 if ((cfg->opt & MONO_OPT_SHARED) &&
8997 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8998 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8999 MonoInst *addr, *vtvar, *iargs [3];
9000 int method_context_used;
9002 if (cfg->generic_sharing_context)
9003 method_context_used = mono_method_check_context_used (method);
9005 method_context_used = 0;
9007 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9009 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9010 EMIT_NEW_ICONST (cfg, iargs [1], n);
9011 if (method_context_used) {
9012 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9013 method, MONO_RGCTX_INFO_METHOD);
9014 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9016 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9017 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9019 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9021 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9023 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9025 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9026 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9027 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9028 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9029 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9030 MonoClass *tclass = mono_class_from_mono_type (handle);
9032 mono_class_init (tclass);
9034 ins = emit_get_rgctx_klass (cfg, context_used,
9035 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9036 } else if (cfg->compile_aot) {
9037 if (method->wrapper_type) {
9038 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9039 /* Special case for static synchronized wrappers */
9040 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9042 /* FIXME: n is not a normal token */
9043 cfg->disable_aot = TRUE;
9044 EMIT_NEW_PCONST (cfg, ins, NULL);
9047 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9050 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9052 ins->type = STACK_OBJ;
9053 ins->klass = cmethod->klass;
9056 MonoInst *addr, *vtvar;
9058 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9061 if (handle_class == mono_defaults.typehandle_class) {
9062 ins = emit_get_rgctx_klass (cfg, context_used,
9063 mono_class_from_mono_type (handle),
9064 MONO_RGCTX_INFO_TYPE);
9065 } else if (handle_class == mono_defaults.methodhandle_class) {
9066 ins = emit_get_rgctx_method (cfg, context_used,
9067 handle, MONO_RGCTX_INFO_METHOD);
9068 } else if (handle_class == mono_defaults.fieldhandle_class) {
9069 ins = emit_get_rgctx_field (cfg, context_used,
9070 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9072 g_assert_not_reached ();
9074 } else if (cfg->compile_aot) {
9075 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9077 EMIT_NEW_PCONST (cfg, ins, handle);
9079 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9081 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9091 MONO_INST_NEW (cfg, ins, OP_THROW);
9093 ins->sreg1 = sp [0]->dreg;
9095 bblock->out_of_line = TRUE;
9096 MONO_ADD_INS (bblock, ins);
9097 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9098 MONO_ADD_INS (bblock, ins);
9101 link_bblock (cfg, bblock, end_bblock);
9102 start_new_bblock = 1;
9104 case CEE_ENDFINALLY:
9105 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9106 MONO_ADD_INS (bblock, ins);
9108 start_new_bblock = 1;
9111 * Control will leave the method so empty the stack, otherwise
9112 * the next basic block will start with a nonempty stack.
9114 while (sp != stack_start) {
9122 if (*ip == CEE_LEAVE) {
9124 target = ip + 5 + (gint32)read32(ip + 1);
9127 target = ip + 2 + (signed char)(ip [1]);
9130 /* empty the stack */
9131 while (sp != stack_start) {
9136 * If this leave statement is in a catch block, check for a
9137 * pending exception, and rethrow it if necessary.
9138 * We avoid doing this in runtime invoke wrappers, since those are called
9139 * by native code which excepts the wrapper to catch all exceptions.
9141 for (i = 0; i < header->num_clauses; ++i) {
9142 MonoExceptionClause *clause = &header->clauses [i];
9145 * Use <= in the final comparison to handle clauses with multiple
9146 * leave statements, like in bug #78024.
9147 * The ordering of the exception clauses guarantees that we find the
9150 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9152 MonoBasicBlock *dont_throw;
9157 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9160 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9162 NEW_BBLOCK (cfg, dont_throw);
9165 * Currently, we allways rethrow the abort exception, despite the
9166 * fact that this is not correct. See thread6.cs for an example.
9167 * But propagating the abort exception is more important than
9168 * getting the sematics right.
9170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9171 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9172 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9174 MONO_START_BB (cfg, dont_throw);
9179 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9181 MonoExceptionClause *clause;
9183 for (tmp = handlers; tmp; tmp = tmp->next) {
9185 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9187 link_bblock (cfg, bblock, tblock);
9188 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9189 ins->inst_target_bb = tblock;
9190 ins->inst_eh_block = clause;
9191 MONO_ADD_INS (bblock, ins);
9192 bblock->has_call_handler = 1;
9193 if (COMPILE_LLVM (cfg)) {
9194 MonoBasicBlock *target_bb;
9197 * Link the finally bblock with the target, since it will
9198 * conceptually branch there.
9199 * FIXME: Have to link the bblock containing the endfinally.
9201 GET_BBLOCK (cfg, target_bb, target);
9202 link_bblock (cfg, tblock, target_bb);
9205 g_list_free (handlers);
9208 MONO_INST_NEW (cfg, ins, OP_BR);
9209 MONO_ADD_INS (bblock, ins);
9210 GET_BBLOCK (cfg, tblock, target);
9211 link_bblock (cfg, bblock, tblock);
9212 ins->inst_target_bb = tblock;
9213 start_new_bblock = 1;
9215 if (*ip == CEE_LEAVE)
9224 * Mono specific opcodes
9226 case MONO_CUSTOM_PREFIX: {
9228 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9232 case CEE_MONO_ICALL: {
9234 MonoJitICallInfo *info;
9236 token = read32 (ip + 2);
9237 func = mono_method_get_wrapper_data (method, token);
9238 info = mono_find_jit_icall_by_addr (func);
9241 CHECK_STACK (info->sig->param_count);
9242 sp -= info->sig->param_count;
9244 ins = mono_emit_jit_icall (cfg, info->func, sp);
9245 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9249 inline_costs += 10 * num_calls++;
9253 case CEE_MONO_LDPTR: {
9256 CHECK_STACK_OVF (1);
9258 token = read32 (ip + 2);
9260 ptr = mono_method_get_wrapper_data (method, token);
9261 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9262 MonoJitICallInfo *callinfo;
9263 const char *icall_name;
9265 icall_name = method->name + strlen ("__icall_wrapper_");
9266 g_assert (icall_name);
9267 callinfo = mono_find_jit_icall_by_name (icall_name);
9268 g_assert (callinfo);
9270 if (ptr == callinfo->func) {
9271 /* Will be transformed into an AOTCONST later */
9272 EMIT_NEW_PCONST (cfg, ins, ptr);
9278 /* FIXME: Generalize this */
9279 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9280 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9285 EMIT_NEW_PCONST (cfg, ins, ptr);
9288 inline_costs += 10 * num_calls++;
9289 /* Can't embed random pointers into AOT code */
9290 cfg->disable_aot = 1;
9293 case CEE_MONO_ICALL_ADDR: {
9294 MonoMethod *cmethod;
9297 CHECK_STACK_OVF (1);
9299 token = read32 (ip + 2);
9301 cmethod = mono_method_get_wrapper_data (method, token);
9303 if (cfg->compile_aot) {
9304 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9306 ptr = mono_lookup_internal_call (cmethod);
9308 EMIT_NEW_PCONST (cfg, ins, ptr);
9314 case CEE_MONO_VTADDR: {
9315 MonoInst *src_var, *src;
9321 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9322 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9327 case CEE_MONO_NEWOBJ: {
9328 MonoInst *iargs [2];
9330 CHECK_STACK_OVF (1);
9332 token = read32 (ip + 2);
9333 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9334 mono_class_init (klass);
9335 NEW_DOMAINCONST (cfg, iargs [0]);
9336 MONO_ADD_INS (cfg->cbb, iargs [0]);
9337 NEW_CLASSCONST (cfg, iargs [1], klass);
9338 MONO_ADD_INS (cfg->cbb, iargs [1]);
9339 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9341 inline_costs += 10 * num_calls++;
9344 case CEE_MONO_OBJADDR:
9347 MONO_INST_NEW (cfg, ins, OP_MOVE);
9348 ins->dreg = alloc_preg (cfg);
9349 ins->sreg1 = sp [0]->dreg;
9350 ins->type = STACK_MP;
9351 MONO_ADD_INS (cfg->cbb, ins);
9355 case CEE_MONO_LDNATIVEOBJ:
9357 * Similar to LDOBJ, but instead load the unmanaged
9358 * representation of the vtype to the stack.
9363 token = read32 (ip + 2);
9364 klass = mono_method_get_wrapper_data (method, token);
9365 g_assert (klass->valuetype);
9366 mono_class_init (klass);
9369 MonoInst *src, *dest, *temp;
9372 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9373 temp->backend.is_pinvoke = 1;
9374 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9375 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9377 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9378 dest->type = STACK_VTYPE;
9379 dest->klass = klass;
9385 case CEE_MONO_RETOBJ: {
9387 * Same as RET, but return the native representation of a vtype
9390 g_assert (cfg->ret);
9391 g_assert (mono_method_signature (method)->pinvoke);
9396 token = read32 (ip + 2);
9397 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9399 if (!cfg->vret_addr) {
9400 g_assert (cfg->ret_var_is_local);
9402 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9404 EMIT_NEW_RETLOADA (cfg, ins);
9406 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9408 if (sp != stack_start)
9411 MONO_INST_NEW (cfg, ins, OP_BR);
9412 ins->inst_target_bb = end_bblock;
9413 MONO_ADD_INS (bblock, ins);
9414 link_bblock (cfg, bblock, end_bblock);
9415 start_new_bblock = 1;
9419 case CEE_MONO_CISINST:
9420 case CEE_MONO_CCASTCLASS: {
9425 token = read32 (ip + 2);
9426 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9427 if (ip [1] == CEE_MONO_CISINST)
9428 ins = handle_cisinst (cfg, klass, sp [0]);
9430 ins = handle_ccastclass (cfg, klass, sp [0]);
9436 case CEE_MONO_SAVE_LMF:
9437 case CEE_MONO_RESTORE_LMF:
9438 #ifdef MONO_ARCH_HAVE_LMF_OPS
9439 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9440 MONO_ADD_INS (bblock, ins);
9441 cfg->need_lmf_area = TRUE;
9445 case CEE_MONO_CLASSCONST:
9446 CHECK_STACK_OVF (1);
9448 token = read32 (ip + 2);
9449 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9452 inline_costs += 10 * num_calls++;
9454 case CEE_MONO_NOT_TAKEN:
9455 bblock->out_of_line = TRUE;
9459 CHECK_STACK_OVF (1);
9461 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9462 ins->dreg = alloc_preg (cfg);
9463 ins->inst_offset = (gint32)read32 (ip + 2);
9464 ins->type = STACK_PTR;
9465 MONO_ADD_INS (bblock, ins);
9469 case CEE_MONO_DYN_CALL: {
9472 /* It would be easier to call a trampoline, but that would put an
9473 * extra frame on the stack, confusing exception handling. So
9474 * implement it inline using an opcode for now.
9477 if (!cfg->dyn_call_var) {
9478 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9479 /* prevent it from being register allocated */
9480 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9483 /* Has to use a call inst since it local regalloc expects it */
9484 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9485 ins = (MonoInst*)call;
9487 ins->sreg1 = sp [0]->dreg;
9488 ins->sreg2 = sp [1]->dreg;
9489 MONO_ADD_INS (bblock, ins);
9491 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9492 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9496 inline_costs += 10 * num_calls++;
9501 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9511 /* somewhat similar to LDTOKEN */
9512 MonoInst *addr, *vtvar;
9513 CHECK_STACK_OVF (1);
9514 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9516 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9517 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9519 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9520 ins->type = STACK_VTYPE;
9521 ins->klass = mono_defaults.argumenthandle_class;
9534 * The following transforms:
9535 * CEE_CEQ into OP_CEQ
9536 * CEE_CGT into OP_CGT
9537 * CEE_CGT_UN into OP_CGT_UN
9538 * CEE_CLT into OP_CLT
9539 * CEE_CLT_UN into OP_CLT_UN
9541 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9543 MONO_INST_NEW (cfg, ins, cmp->opcode);
9545 cmp->sreg1 = sp [0]->dreg;
9546 cmp->sreg2 = sp [1]->dreg;
9547 type_from_op (cmp, sp [0], sp [1]);
9549 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9550 cmp->opcode = OP_LCOMPARE;
9551 else if (sp [0]->type == STACK_R8)
9552 cmp->opcode = OP_FCOMPARE;
9554 cmp->opcode = OP_ICOMPARE;
9555 MONO_ADD_INS (bblock, cmp);
9556 ins->type = STACK_I4;
9557 ins->dreg = alloc_dreg (cfg, ins->type);
9558 type_from_op (ins, sp [0], sp [1]);
9560 if (cmp->opcode == OP_FCOMPARE) {
9562 * The backends expect the fceq opcodes to do the
9565 cmp->opcode = OP_NOP;
9566 ins->sreg1 = cmp->sreg1;
9567 ins->sreg2 = cmp->sreg2;
9569 MONO_ADD_INS (bblock, ins);
9576 MonoMethod *cil_method;
9577 gboolean needs_static_rgctx_invoke;
9579 CHECK_STACK_OVF (1);
9581 n = read32 (ip + 2);
9582 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9583 if (!cmethod || mono_loader_get_last_error ())
9585 mono_class_init (cmethod->klass);
9587 mono_save_token_info (cfg, image, n, cmethod);
9589 if (cfg->generic_sharing_context)
9590 context_used = mono_method_check_context_used (cmethod);
9592 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9594 cil_method = cmethod;
9595 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9596 METHOD_ACCESS_FAILURE;
9598 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9599 if (check_linkdemand (cfg, method, cmethod))
9601 CHECK_CFG_EXCEPTION;
9602 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9603 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9607 * Optimize the common case of ldftn+delegate creation
9609 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9610 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9611 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9613 int invoke_context_used = 0;
9615 invoke = mono_get_delegate_invoke (ctor_method->klass);
9616 if (!invoke || !mono_method_signature (invoke))
9619 if (cfg->generic_sharing_context)
9620 invoke_context_used = mono_method_check_context_used (invoke);
9622 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9623 /* FIXME: SGEN support */
9624 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9625 MonoInst *target_ins;
9628 if (cfg->verbose_level > 3)
9629 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9630 target_ins = sp [-1];
9632 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9633 CHECK_CFG_EXCEPTION;
9642 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9643 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9647 inline_costs += 10 * num_calls++;
9650 case CEE_LDVIRTFTN: {
9655 n = read32 (ip + 2);
9656 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9657 if (!cmethod || mono_loader_get_last_error ())
9659 mono_class_init (cmethod->klass);
9661 if (cfg->generic_sharing_context)
9662 context_used = mono_method_check_context_used (cmethod);
9664 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9665 if (check_linkdemand (cfg, method, cmethod))
9667 CHECK_CFG_EXCEPTION;
9668 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9669 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9675 args [1] = emit_get_rgctx_method (cfg, context_used,
9676 cmethod, MONO_RGCTX_INFO_METHOD);
9679 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9681 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9684 inline_costs += 10 * num_calls++;
9688 CHECK_STACK_OVF (1);
9690 n = read16 (ip + 2);
9692 EMIT_NEW_ARGLOAD (cfg, ins, n);
9697 CHECK_STACK_OVF (1);
9699 n = read16 (ip + 2);
9701 NEW_ARGLOADA (cfg, ins, n);
9702 MONO_ADD_INS (cfg->cbb, ins);
9710 n = read16 (ip + 2);
9712 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9714 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9718 CHECK_STACK_OVF (1);
9720 n = read16 (ip + 2);
9722 EMIT_NEW_LOCLOAD (cfg, ins, n);
9727 unsigned char *tmp_ip;
9728 CHECK_STACK_OVF (1);
9730 n = read16 (ip + 2);
9733 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9739 EMIT_NEW_LOCLOADA (cfg, ins, n);
9748 n = read16 (ip + 2);
9750 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9752 emit_stloc_ir (cfg, sp, header, n);
9759 if (sp != stack_start)
9761 if (cfg->method != method)
9763 * Inlining this into a loop in a parent could lead to
9764 * stack overflows which is different behavior than the
9765 * non-inlined case, thus disable inlining in this case.
9767 goto inline_failure;
9769 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9770 ins->dreg = alloc_preg (cfg);
9771 ins->sreg1 = sp [0]->dreg;
9772 ins->type = STACK_PTR;
9773 MONO_ADD_INS (cfg->cbb, ins);
9775 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9777 ins->flags |= MONO_INST_INIT;
9782 case CEE_ENDFILTER: {
9783 MonoExceptionClause *clause, *nearest;
9784 int cc, nearest_num;
9788 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9790 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9791 ins->sreg1 = (*sp)->dreg;
9792 MONO_ADD_INS (bblock, ins);
9793 start_new_bblock = 1;
9798 for (cc = 0; cc < header->num_clauses; ++cc) {
9799 clause = &header->clauses [cc];
9800 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9801 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9802 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9808 if ((ip - header->code) != nearest->handler_offset)
9813 case CEE_UNALIGNED_:
9814 ins_flag |= MONO_INST_UNALIGNED;
9815 /* FIXME: record alignment? we can assume 1 for now */
9820 ins_flag |= MONO_INST_VOLATILE;
9824 ins_flag |= MONO_INST_TAILCALL;
9825 cfg->flags |= MONO_CFG_HAS_TAIL;
9826 /* Can't inline tail calls at this time */
9827 inline_costs += 100000;
9834 token = read32 (ip + 2);
9835 klass = mini_get_class (method, token, generic_context);
9836 CHECK_TYPELOAD (klass);
9837 if (generic_class_is_reference_type (cfg, klass))
9838 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9840 mini_emit_initobj (cfg, *sp, NULL, klass);
9844 case CEE_CONSTRAINED_:
9846 token = read32 (ip + 2);
9847 if (method->wrapper_type != MONO_WRAPPER_NONE)
9848 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9850 constrained_call = mono_class_get_full (image, token, generic_context);
9851 CHECK_TYPELOAD (constrained_call);
9856 MonoInst *iargs [3];
9860 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9861 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9862 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9863 /* emit_memset only works when val == 0 */
9864 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9869 if (ip [1] == CEE_CPBLK) {
9870 MonoMethod *memcpy_method = get_memcpy_method ();
9871 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9873 MonoMethod *memset_method = get_memset_method ();
9874 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9884 ins_flag |= MONO_INST_NOTYPECHECK;
9886 ins_flag |= MONO_INST_NORANGECHECK;
9887 /* we ignore the no-nullcheck for now since we
9888 * really do it explicitly only when doing callvirt->call
9894 int handler_offset = -1;
9896 for (i = 0; i < header->num_clauses; ++i) {
9897 MonoExceptionClause *clause = &header->clauses [i];
9898 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9899 handler_offset = clause->handler_offset;
9904 bblock->flags |= BB_EXCEPTION_UNSAFE;
9906 g_assert (handler_offset != -1);
9908 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9909 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9910 ins->sreg1 = load->dreg;
9911 MONO_ADD_INS (bblock, ins);
9913 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9914 MONO_ADD_INS (bblock, ins);
9917 link_bblock (cfg, bblock, end_bblock);
9918 start_new_bblock = 1;
9926 CHECK_STACK_OVF (1);
9928 token = read32 (ip + 2);
9929 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9930 MonoType *type = mono_type_create_from_typespec (image, token);
9931 token = mono_type_size (type, &ialign);
9933 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9934 CHECK_TYPELOAD (klass);
9935 mono_class_init (klass);
9936 token = mono_class_value_size (klass, &align);
9938 EMIT_NEW_ICONST (cfg, ins, token);
9943 case CEE_REFANYTYPE: {
9944 MonoInst *src_var, *src;
9950 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9952 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9953 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9954 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9972 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9982 g_warning ("opcode 0x%02x not handled", *ip);
9986 if (start_new_bblock != 1)
9989 bblock->cil_length = ip - bblock->cil_code;
9990 bblock->next_bb = end_bblock;
9992 if (cfg->method == method && cfg->domainvar) {
9994 MonoInst *get_domain;
9996 cfg->cbb = init_localsbb;
9998 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9999 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10002 get_domain->dreg = alloc_preg (cfg);
10003 MONO_ADD_INS (cfg->cbb, get_domain);
10005 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10006 MONO_ADD_INS (cfg->cbb, store);
10009 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10010 if (cfg->compile_aot)
10011 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10012 mono_get_got_var (cfg);
10015 if (cfg->method == method && cfg->got_var)
10016 mono_emit_load_got_addr (cfg);
10021 cfg->cbb = init_localsbb;
10023 for (i = 0; i < header->num_locals; ++i) {
10024 MonoType *ptype = header->locals [i];
10025 int t = ptype->type;
10026 dreg = cfg->locals [i]->dreg;
10028 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10029 t = mono_class_enum_basetype (ptype->data.klass)->type;
10030 if (ptype->byref) {
10031 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10032 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10033 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10034 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10035 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10036 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10037 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10038 ins->type = STACK_R8;
10039 ins->inst_p0 = (void*)&r8_0;
10040 ins->dreg = alloc_dreg (cfg, STACK_R8);
10041 MONO_ADD_INS (init_localsbb, ins);
10042 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10043 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10044 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10045 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10047 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10052 if (cfg->init_ref_vars && cfg->method == method) {
10053 /* Emit initialization for ref vars */
10054 // FIXME: Avoid duplication initialization for IL locals.
10055 for (i = 0; i < cfg->num_varinfo; ++i) {
10056 MonoInst *ins = cfg->varinfo [i];
10058 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10059 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10063 /* Add a sequence point for method entry/exit events */
10065 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10066 MONO_ADD_INS (init_localsbb, ins);
10067 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10068 MONO_ADD_INS (cfg->bb_exit, ins);
10073 if (cfg->method == method) {
10074 MonoBasicBlock *bb;
10075 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10076 bb->region = mono_find_block_region (cfg, bb->real_offset);
10078 mono_create_spvar_for_region (cfg, bb->region);
10079 if (cfg->verbose_level > 2)
10080 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10084 g_slist_free (class_inits);
10085 dont_inline = g_list_remove (dont_inline, method);
10087 if (inline_costs < 0) {
10090 /* Method is too large */
10091 mname = mono_method_full_name (method, TRUE);
10092 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10093 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10095 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10096 mono_basic_block_free (original_bb);
10100 if ((cfg->verbose_level > 2) && (cfg->method == method))
10101 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10103 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10104 mono_basic_block_free (original_bb);
10105 return inline_costs;
10108 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10115 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10119 set_exception_type_from_invalid_il (cfg, method, ip);
10123 g_slist_free (class_inits);
10124 mono_basic_block_free (original_bb);
10125 dont_inline = g_list_remove (dont_inline, method);
10126 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10131 store_membase_reg_to_store_membase_imm (int opcode)
10134 case OP_STORE_MEMBASE_REG:
10135 return OP_STORE_MEMBASE_IMM;
10136 case OP_STOREI1_MEMBASE_REG:
10137 return OP_STOREI1_MEMBASE_IMM;
10138 case OP_STOREI2_MEMBASE_REG:
10139 return OP_STOREI2_MEMBASE_IMM;
10140 case OP_STOREI4_MEMBASE_REG:
10141 return OP_STOREI4_MEMBASE_IMM;
10142 case OP_STOREI8_MEMBASE_REG:
10143 return OP_STOREI8_MEMBASE_IMM;
10145 g_assert_not_reached ();
10151 #endif /* DISABLE_JIT */
10154 mono_op_to_op_imm (int opcode)
10158 return OP_IADD_IMM;
10160 return OP_ISUB_IMM;
10162 return OP_IDIV_IMM;
10164 return OP_IDIV_UN_IMM;
10166 return OP_IREM_IMM;
10168 return OP_IREM_UN_IMM;
10170 return OP_IMUL_IMM;
10172 return OP_IAND_IMM;
10176 return OP_IXOR_IMM;
10178 return OP_ISHL_IMM;
10180 return OP_ISHR_IMM;
10182 return OP_ISHR_UN_IMM;
10185 return OP_LADD_IMM;
10187 return OP_LSUB_IMM;
10189 return OP_LAND_IMM;
10193 return OP_LXOR_IMM;
10195 return OP_LSHL_IMM;
10197 return OP_LSHR_IMM;
10199 return OP_LSHR_UN_IMM;
10202 return OP_COMPARE_IMM;
10204 return OP_ICOMPARE_IMM;
10206 return OP_LCOMPARE_IMM;
10208 case OP_STORE_MEMBASE_REG:
10209 return OP_STORE_MEMBASE_IMM;
10210 case OP_STOREI1_MEMBASE_REG:
10211 return OP_STOREI1_MEMBASE_IMM;
10212 case OP_STOREI2_MEMBASE_REG:
10213 return OP_STOREI2_MEMBASE_IMM;
10214 case OP_STOREI4_MEMBASE_REG:
10215 return OP_STOREI4_MEMBASE_IMM;
10217 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10219 return OP_X86_PUSH_IMM;
10220 case OP_X86_COMPARE_MEMBASE_REG:
10221 return OP_X86_COMPARE_MEMBASE_IMM;
10223 #if defined(TARGET_AMD64)
10224 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10225 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10227 case OP_VOIDCALL_REG:
10228 return OP_VOIDCALL;
10236 return OP_LOCALLOC_IMM;
10243 ldind_to_load_membase (int opcode)
10247 return OP_LOADI1_MEMBASE;
10249 return OP_LOADU1_MEMBASE;
10251 return OP_LOADI2_MEMBASE;
10253 return OP_LOADU2_MEMBASE;
10255 return OP_LOADI4_MEMBASE;
10257 return OP_LOADU4_MEMBASE;
10259 return OP_LOAD_MEMBASE;
10260 case CEE_LDIND_REF:
10261 return OP_LOAD_MEMBASE;
10263 return OP_LOADI8_MEMBASE;
10265 return OP_LOADR4_MEMBASE;
10267 return OP_LOADR8_MEMBASE;
10269 g_assert_not_reached ();
10276 stind_to_store_membase (int opcode)
10280 return OP_STOREI1_MEMBASE_REG;
10282 return OP_STOREI2_MEMBASE_REG;
10284 return OP_STOREI4_MEMBASE_REG;
10286 case CEE_STIND_REF:
10287 return OP_STORE_MEMBASE_REG;
10289 return OP_STOREI8_MEMBASE_REG;
10291 return OP_STORER4_MEMBASE_REG;
10293 return OP_STORER8_MEMBASE_REG;
10295 g_assert_not_reached ();
10302 mono_load_membase_to_load_mem (int opcode)
10304 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10305 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10307 case OP_LOAD_MEMBASE:
10308 return OP_LOAD_MEM;
10309 case OP_LOADU1_MEMBASE:
10310 return OP_LOADU1_MEM;
10311 case OP_LOADU2_MEMBASE:
10312 return OP_LOADU2_MEM;
10313 case OP_LOADI4_MEMBASE:
10314 return OP_LOADI4_MEM;
10315 case OP_LOADU4_MEMBASE:
10316 return OP_LOADU4_MEM;
10317 #if SIZEOF_REGISTER == 8
10318 case OP_LOADI8_MEMBASE:
10319 return OP_LOADI8_MEM;
10328 op_to_op_dest_membase (int store_opcode, int opcode)
10330 #if defined(TARGET_X86)
10331 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10336 return OP_X86_ADD_MEMBASE_REG;
10338 return OP_X86_SUB_MEMBASE_REG;
10340 return OP_X86_AND_MEMBASE_REG;
10342 return OP_X86_OR_MEMBASE_REG;
10344 return OP_X86_XOR_MEMBASE_REG;
10347 return OP_X86_ADD_MEMBASE_IMM;
10350 return OP_X86_SUB_MEMBASE_IMM;
10353 return OP_X86_AND_MEMBASE_IMM;
10356 return OP_X86_OR_MEMBASE_IMM;
10359 return OP_X86_XOR_MEMBASE_IMM;
10365 #if defined(TARGET_AMD64)
10366 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10371 return OP_X86_ADD_MEMBASE_REG;
10373 return OP_X86_SUB_MEMBASE_REG;
10375 return OP_X86_AND_MEMBASE_REG;
10377 return OP_X86_OR_MEMBASE_REG;
10379 return OP_X86_XOR_MEMBASE_REG;
10381 return OP_X86_ADD_MEMBASE_IMM;
10383 return OP_X86_SUB_MEMBASE_IMM;
10385 return OP_X86_AND_MEMBASE_IMM;
10387 return OP_X86_OR_MEMBASE_IMM;
10389 return OP_X86_XOR_MEMBASE_IMM;
10391 return OP_AMD64_ADD_MEMBASE_REG;
10393 return OP_AMD64_SUB_MEMBASE_REG;
10395 return OP_AMD64_AND_MEMBASE_REG;
10397 return OP_AMD64_OR_MEMBASE_REG;
10399 return OP_AMD64_XOR_MEMBASE_REG;
10402 return OP_AMD64_ADD_MEMBASE_IMM;
10405 return OP_AMD64_SUB_MEMBASE_IMM;
10408 return OP_AMD64_AND_MEMBASE_IMM;
10411 return OP_AMD64_OR_MEMBASE_IMM;
10414 return OP_AMD64_XOR_MEMBASE_IMM;
10424 op_to_op_store_membase (int store_opcode, int opcode)
10426 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10429 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10430 return OP_X86_SETEQ_MEMBASE;
10432 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10433 return OP_X86_SETNE_MEMBASE;
10441 op_to_op_src1_membase (int load_opcode, int opcode)
10444 /* FIXME: This has sign extension issues */
10446 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10447 return OP_X86_COMPARE_MEMBASE8_IMM;
10450 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10455 return OP_X86_PUSH_MEMBASE;
10456 case OP_COMPARE_IMM:
10457 case OP_ICOMPARE_IMM:
10458 return OP_X86_COMPARE_MEMBASE_IMM;
10461 return OP_X86_COMPARE_MEMBASE_REG;
10465 #ifdef TARGET_AMD64
10466 /* FIXME: This has sign extension issues */
10468 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10469 return OP_X86_COMPARE_MEMBASE8_IMM;
10474 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10475 return OP_X86_PUSH_MEMBASE;
10477 /* FIXME: This only works for 32 bit immediates
10478 case OP_COMPARE_IMM:
10479 case OP_LCOMPARE_IMM:
10480 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10481 return OP_AMD64_COMPARE_MEMBASE_IMM;
10483 case OP_ICOMPARE_IMM:
10484 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10485 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10489 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10490 return OP_AMD64_COMPARE_MEMBASE_REG;
10493 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10494 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10503 op_to_op_src2_membase (int load_opcode, int opcode)
10506 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10512 return OP_X86_COMPARE_REG_MEMBASE;
10514 return OP_X86_ADD_REG_MEMBASE;
10516 return OP_X86_SUB_REG_MEMBASE;
10518 return OP_X86_AND_REG_MEMBASE;
10520 return OP_X86_OR_REG_MEMBASE;
10522 return OP_X86_XOR_REG_MEMBASE;
10526 #ifdef TARGET_AMD64
10527 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10530 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10532 return OP_X86_ADD_REG_MEMBASE;
10534 return OP_X86_SUB_REG_MEMBASE;
10536 return OP_X86_AND_REG_MEMBASE;
10538 return OP_X86_OR_REG_MEMBASE;
10540 return OP_X86_XOR_REG_MEMBASE;
10542 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10546 return OP_AMD64_COMPARE_REG_MEMBASE;
10548 return OP_AMD64_ADD_REG_MEMBASE;
10550 return OP_AMD64_SUB_REG_MEMBASE;
10552 return OP_AMD64_AND_REG_MEMBASE;
10554 return OP_AMD64_OR_REG_MEMBASE;
10556 return OP_AMD64_XOR_REG_MEMBASE;
10565 mono_op_to_op_imm_noemul (int opcode)
10568 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10574 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10582 return mono_op_to_op_imm (opcode);
10586 #ifndef DISABLE_JIT
10589 * mono_handle_global_vregs:
10591 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10595 mono_handle_global_vregs (MonoCompile *cfg)
10597 gint32 *vreg_to_bb;
10598 MonoBasicBlock *bb;
10601 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10603 #ifdef MONO_ARCH_SIMD_INTRINSICS
10604 if (cfg->uses_simd_intrinsics)
10605 mono_simd_simplify_indirection (cfg);
10608 /* Find local vregs used in more than one bb */
10609 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10610 MonoInst *ins = bb->code;
10611 int block_num = bb->block_num;
10613 if (cfg->verbose_level > 2)
10614 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10617 for (; ins; ins = ins->next) {
10618 const char *spec = INS_INFO (ins->opcode);
10619 int regtype = 0, regindex;
10622 if (G_UNLIKELY (cfg->verbose_level > 2))
10623 mono_print_ins (ins);
10625 g_assert (ins->opcode >= MONO_CEE_LAST);
10627 for (regindex = 0; regindex < 4; regindex ++) {
10630 if (regindex == 0) {
10631 regtype = spec [MONO_INST_DEST];
10632 if (regtype == ' ')
10635 } else if (regindex == 1) {
10636 regtype = spec [MONO_INST_SRC1];
10637 if (regtype == ' ')
10640 } else if (regindex == 2) {
10641 regtype = spec [MONO_INST_SRC2];
10642 if (regtype == ' ')
10645 } else if (regindex == 3) {
10646 regtype = spec [MONO_INST_SRC3];
10647 if (regtype == ' ')
10652 #if SIZEOF_REGISTER == 4
10653 /* In the LLVM case, the long opcodes are not decomposed */
10654 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10656 * Since some instructions reference the original long vreg,
10657 * and some reference the two component vregs, it is quite hard
10658 * to determine when it needs to be global. So be conservative.
10660 if (!get_vreg_to_inst (cfg, vreg)) {
10661 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10663 if (cfg->verbose_level > 2)
10664 printf ("LONG VREG R%d made global.\n", vreg);
10668 * Make the component vregs volatile since the optimizations can
10669 * get confused otherwise.
10671 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10672 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10676 g_assert (vreg != -1);
10678 prev_bb = vreg_to_bb [vreg];
10679 if (prev_bb == 0) {
10680 /* 0 is a valid block num */
10681 vreg_to_bb [vreg] = block_num + 1;
10682 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10683 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10686 if (!get_vreg_to_inst (cfg, vreg)) {
10687 if (G_UNLIKELY (cfg->verbose_level > 2))
10688 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10692 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10695 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10698 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10701 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10704 g_assert_not_reached ();
10708 /* Flag as having been used in more than one bb */
10709 vreg_to_bb [vreg] = -1;
10715 /* If a variable is used in only one bblock, convert it into a local vreg */
10716 for (i = 0; i < cfg->num_varinfo; i++) {
10717 MonoInst *var = cfg->varinfo [i];
10718 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10720 switch (var->type) {
10726 #if SIZEOF_REGISTER == 8
10729 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10730 /* Enabling this screws up the fp stack on x86 */
10733 /* Arguments are implicitly global */
10734 /* Putting R4 vars into registers doesn't work currently */
10735 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10737 * Make that the variable's liveness interval doesn't contain a call, since
10738 * that would cause the lvreg to be spilled, making the whole optimization
10741 /* This is too slow for JIT compilation */
10743 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10745 int def_index, call_index, ins_index;
10746 gboolean spilled = FALSE;
10751 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10752 const char *spec = INS_INFO (ins->opcode);
10754 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10755 def_index = ins_index;
10757 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10758 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10759 if (call_index > def_index) {
10765 if (MONO_IS_CALL (ins))
10766 call_index = ins_index;
10776 if (G_UNLIKELY (cfg->verbose_level > 2))
10777 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10778 var->flags |= MONO_INST_IS_DEAD;
10779 cfg->vreg_to_inst [var->dreg] = NULL;
10786 * Compress the varinfo and vars tables so the liveness computation is faster and
10787 * takes up less space.
10790 for (i = 0; i < cfg->num_varinfo; ++i) {
10791 MonoInst *var = cfg->varinfo [i];
10792 if (pos < i && cfg->locals_start == i)
10793 cfg->locals_start = pos;
10794 if (!(var->flags & MONO_INST_IS_DEAD)) {
10796 cfg->varinfo [pos] = cfg->varinfo [i];
10797 cfg->varinfo [pos]->inst_c0 = pos;
10798 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10799 cfg->vars [pos].idx = pos;
10800 #if SIZEOF_REGISTER == 4
10801 if (cfg->varinfo [pos]->type == STACK_I8) {
10802 /* Modify the two component vars too */
10805 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10806 var1->inst_c0 = pos;
10807 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10808 var1->inst_c0 = pos;
10815 cfg->num_varinfo = pos;
10816 if (cfg->locals_start > cfg->num_varinfo)
10817 cfg->locals_start = cfg->num_varinfo;
10821 * mono_spill_global_vars:
10823 * Generate spill code for variables which are not allocated to registers,
10824 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10825 * code is generated which could be optimized by the local optimization passes.
10828 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10830 MonoBasicBlock *bb;
10832 int orig_next_vreg;
10833 guint32 *vreg_to_lvreg;
10835 guint32 i, lvregs_len;
10836 gboolean dest_has_lvreg = FALSE;
10837 guint32 stacktypes [128];
10838 MonoInst **live_range_start, **live_range_end;
10839 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10841 *need_local_opts = FALSE;
10843 memset (spec2, 0, sizeof (spec2));
10845 /* FIXME: Move this function to mini.c */
10846 stacktypes ['i'] = STACK_PTR;
10847 stacktypes ['l'] = STACK_I8;
10848 stacktypes ['f'] = STACK_R8;
10849 #ifdef MONO_ARCH_SIMD_INTRINSICS
10850 stacktypes ['x'] = STACK_VTYPE;
10853 #if SIZEOF_REGISTER == 4
10854 /* Create MonoInsts for longs */
10855 for (i = 0; i < cfg->num_varinfo; i++) {
10856 MonoInst *ins = cfg->varinfo [i];
10858 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10859 switch (ins->type) {
10864 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10867 g_assert (ins->opcode == OP_REGOFFSET);
10869 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10871 tree->opcode = OP_REGOFFSET;
10872 tree->inst_basereg = ins->inst_basereg;
10873 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10875 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10877 tree->opcode = OP_REGOFFSET;
10878 tree->inst_basereg = ins->inst_basereg;
10879 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10889 /* FIXME: widening and truncation */
10892 * As an optimization, when a variable allocated to the stack is first loaded into
10893 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10894 * the variable again.
10896 orig_next_vreg = cfg->next_vreg;
10897 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10898 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10902 * These arrays contain the first and last instructions accessing a given
10904 * Since we emit bblocks in the same order we process them here, and we
10905 * don't split live ranges, these will precisely describe the live range of
10906 * the variable, i.e. the instruction range where a valid value can be found
10907 * in the variables location.
10908 * The live range is computed using the liveness info computed by the liveness pass.
10909 * We can't use vmv->range, since that is an abstract live range, and we need
10910 * one which is instruction precise.
10911 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10913 /* FIXME: Only do this if debugging info is requested */
10914 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10915 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10916 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10917 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10919 /* Add spill loads/stores */
10920 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10923 if (cfg->verbose_level > 2)
10924 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10926 /* Clear vreg_to_lvreg array */
10927 for (i = 0; i < lvregs_len; i++)
10928 vreg_to_lvreg [lvregs [i]] = 0;
10932 MONO_BB_FOR_EACH_INS (bb, ins) {
10933 const char *spec = INS_INFO (ins->opcode);
10934 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10935 gboolean store, no_lvreg;
10936 int sregs [MONO_MAX_SRC_REGS];
10938 if (G_UNLIKELY (cfg->verbose_level > 2))
10939 mono_print_ins (ins);
10941 if (ins->opcode == OP_NOP)
10945 * We handle LDADDR here as well, since it can only be decomposed
10946 * when variable addresses are known.
10948 if (ins->opcode == OP_LDADDR) {
10949 MonoInst *var = ins->inst_p0;
10951 if (var->opcode == OP_VTARG_ADDR) {
10952 /* Happens on SPARC/S390 where vtypes are passed by reference */
10953 MonoInst *vtaddr = var->inst_left;
10954 if (vtaddr->opcode == OP_REGVAR) {
10955 ins->opcode = OP_MOVE;
10956 ins->sreg1 = vtaddr->dreg;
10958 else if (var->inst_left->opcode == OP_REGOFFSET) {
10959 ins->opcode = OP_LOAD_MEMBASE;
10960 ins->inst_basereg = vtaddr->inst_basereg;
10961 ins->inst_offset = vtaddr->inst_offset;
10965 g_assert (var->opcode == OP_REGOFFSET);
10967 ins->opcode = OP_ADD_IMM;
10968 ins->sreg1 = var->inst_basereg;
10969 ins->inst_imm = var->inst_offset;
10972 *need_local_opts = TRUE;
10973 spec = INS_INFO (ins->opcode);
10976 if (ins->opcode < MONO_CEE_LAST) {
10977 mono_print_ins (ins);
10978 g_assert_not_reached ();
10982 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10986 if (MONO_IS_STORE_MEMBASE (ins)) {
10987 tmp_reg = ins->dreg;
10988 ins->dreg = ins->sreg2;
10989 ins->sreg2 = tmp_reg;
10992 spec2 [MONO_INST_DEST] = ' ';
10993 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10994 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10995 spec2 [MONO_INST_SRC3] = ' ';
10997 } else if (MONO_IS_STORE_MEMINDEX (ins))
10998 g_assert_not_reached ();
11003 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11004 printf ("\t %.3s %d", spec, ins->dreg);
11005 num_sregs = mono_inst_get_src_registers (ins, sregs);
11006 for (srcindex = 0; srcindex < 3; ++srcindex)
11007 printf (" %d", sregs [srcindex]);
11014 regtype = spec [MONO_INST_DEST];
11015 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11018 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11019 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11020 MonoInst *store_ins;
11022 MonoInst *def_ins = ins;
11023 int dreg = ins->dreg; /* The original vreg */
11025 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11027 if (var->opcode == OP_REGVAR) {
11028 ins->dreg = var->dreg;
11029 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11031 * Instead of emitting a load+store, use a _membase opcode.
11033 g_assert (var->opcode == OP_REGOFFSET);
11034 if (ins->opcode == OP_MOVE) {
11038 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11039 ins->inst_basereg = var->inst_basereg;
11040 ins->inst_offset = var->inst_offset;
11043 spec = INS_INFO (ins->opcode);
11047 g_assert (var->opcode == OP_REGOFFSET);
11049 prev_dreg = ins->dreg;
11051 /* Invalidate any previous lvreg for this vreg */
11052 vreg_to_lvreg [ins->dreg] = 0;
11056 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11058 store_opcode = OP_STOREI8_MEMBASE_REG;
11061 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11063 if (regtype == 'l') {
11064 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11065 mono_bblock_insert_after_ins (bb, ins, store_ins);
11066 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11067 mono_bblock_insert_after_ins (bb, ins, store_ins);
11068 def_ins = store_ins;
11071 g_assert (store_opcode != OP_STOREV_MEMBASE);
11073 /* Try to fuse the store into the instruction itself */
11074 /* FIXME: Add more instructions */
11075 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11076 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11077 ins->inst_imm = ins->inst_c0;
11078 ins->inst_destbasereg = var->inst_basereg;
11079 ins->inst_offset = var->inst_offset;
11080 spec = INS_INFO (ins->opcode);
11081 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11082 ins->opcode = store_opcode;
11083 ins->inst_destbasereg = var->inst_basereg;
11084 ins->inst_offset = var->inst_offset;
11088 tmp_reg = ins->dreg;
11089 ins->dreg = ins->sreg2;
11090 ins->sreg2 = tmp_reg;
11093 spec2 [MONO_INST_DEST] = ' ';
11094 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11095 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11096 spec2 [MONO_INST_SRC3] = ' ';
11098 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11099 // FIXME: The backends expect the base reg to be in inst_basereg
11100 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11102 ins->inst_basereg = var->inst_basereg;
11103 ins->inst_offset = var->inst_offset;
11104 spec = INS_INFO (ins->opcode);
11106 /* printf ("INS: "); mono_print_ins (ins); */
11107 /* Create a store instruction */
11108 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11110 /* Insert it after the instruction */
11111 mono_bblock_insert_after_ins (bb, ins, store_ins);
11113 def_ins = store_ins;
11116 * We can't assign ins->dreg to var->dreg here, since the
11117 * sregs could use it. So set a flag, and do it after
11120 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11121 dest_has_lvreg = TRUE;
11126 if (def_ins && !live_range_start [dreg]) {
11127 live_range_start [dreg] = def_ins;
11128 live_range_start_bb [dreg] = bb;
11135 num_sregs = mono_inst_get_src_registers (ins, sregs);
11136 for (srcindex = 0; srcindex < 3; ++srcindex) {
11137 regtype = spec [MONO_INST_SRC1 + srcindex];
11138 sreg = sregs [srcindex];
11140 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11141 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11142 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11143 MonoInst *use_ins = ins;
11144 MonoInst *load_ins;
11145 guint32 load_opcode;
11147 if (var->opcode == OP_REGVAR) {
11148 sregs [srcindex] = var->dreg;
11149 //mono_inst_set_src_registers (ins, sregs);
11150 live_range_end [sreg] = use_ins;
11151 live_range_end_bb [sreg] = bb;
11155 g_assert (var->opcode == OP_REGOFFSET);
11157 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11159 g_assert (load_opcode != OP_LOADV_MEMBASE);
11161 if (vreg_to_lvreg [sreg]) {
11162 g_assert (vreg_to_lvreg [sreg] != -1);
11164 /* The variable is already loaded to an lvreg */
11165 if (G_UNLIKELY (cfg->verbose_level > 2))
11166 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11167 sregs [srcindex] = vreg_to_lvreg [sreg];
11168 //mono_inst_set_src_registers (ins, sregs);
11172 /* Try to fuse the load into the instruction */
11173 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11174 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11175 sregs [0] = var->inst_basereg;
11176 //mono_inst_set_src_registers (ins, sregs);
11177 ins->inst_offset = var->inst_offset;
11178 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11179 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11180 sregs [1] = var->inst_basereg;
11181 //mono_inst_set_src_registers (ins, sregs);
11182 ins->inst_offset = var->inst_offset;
11184 if (MONO_IS_REAL_MOVE (ins)) {
11185 ins->opcode = OP_NOP;
11188 //printf ("%d ", srcindex); mono_print_ins (ins);
11190 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11192 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11193 if (var->dreg == prev_dreg) {
11195 * sreg refers to the value loaded by the load
11196 * emitted below, but we need to use ins->dreg
11197 * since it refers to the store emitted earlier.
11201 g_assert (sreg != -1);
11202 vreg_to_lvreg [var->dreg] = sreg;
11203 g_assert (lvregs_len < 1024);
11204 lvregs [lvregs_len ++] = var->dreg;
11208 sregs [srcindex] = sreg;
11209 //mono_inst_set_src_registers (ins, sregs);
11211 if (regtype == 'l') {
11212 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11213 mono_bblock_insert_before_ins (bb, ins, load_ins);
11214 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11215 mono_bblock_insert_before_ins (bb, ins, load_ins);
11216 use_ins = load_ins;
11219 #if SIZEOF_REGISTER == 4
11220 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11222 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11223 mono_bblock_insert_before_ins (bb, ins, load_ins);
11224 use_ins = load_ins;
11228 if (var->dreg < orig_next_vreg) {
11229 live_range_end [var->dreg] = use_ins;
11230 live_range_end_bb [var->dreg] = bb;
11234 mono_inst_set_src_registers (ins, sregs);
11236 if (dest_has_lvreg) {
11237 g_assert (ins->dreg != -1);
11238 vreg_to_lvreg [prev_dreg] = ins->dreg;
11239 g_assert (lvregs_len < 1024);
11240 lvregs [lvregs_len ++] = prev_dreg;
11241 dest_has_lvreg = FALSE;
11245 tmp_reg = ins->dreg;
11246 ins->dreg = ins->sreg2;
11247 ins->sreg2 = tmp_reg;
11250 if (MONO_IS_CALL (ins)) {
11251 /* Clear vreg_to_lvreg array */
11252 for (i = 0; i < lvregs_len; i++)
11253 vreg_to_lvreg [lvregs [i]] = 0;
11255 } else if (ins->opcode == OP_NOP) {
11257 MONO_INST_NULLIFY_SREGS (ins);
11260 if (cfg->verbose_level > 2)
11261 mono_print_ins_index (1, ins);
11264 /* Extend the live range based on the liveness info */
11265 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11266 for (i = 0; i < cfg->num_varinfo; i ++) {
11267 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11269 if (vreg_is_volatile (cfg, vi->vreg))
11270 /* The liveness info is incomplete */
11273 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11274 /* Live from at least the first ins of this bb */
11275 live_range_start [vi->vreg] = bb->code;
11276 live_range_start_bb [vi->vreg] = bb;
11279 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11280 /* Live at least until the last ins of this bb */
11281 live_range_end [vi->vreg] = bb->last_ins;
11282 live_range_end_bb [vi->vreg] = bb;
11288 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11290 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11291 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11293 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11294 for (i = 0; i < cfg->num_varinfo; ++i) {
11295 int vreg = MONO_VARINFO (cfg, i)->vreg;
11298 if (live_range_start [vreg]) {
11299 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11301 ins->inst_c1 = vreg;
11302 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11304 if (live_range_end [vreg]) {
11305 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11307 ins->inst_c1 = vreg;
11308 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11309 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11311 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11317 g_free (live_range_start);
11318 g_free (live_range_end);
11319 g_free (live_range_start_bb);
11320 g_free (live_range_end_bb);
11325 * - use 'iadd' instead of 'int_add'
11326 * - handling ovf opcodes: decompose in method_to_ir.
11327 * - unify iregs/fregs
11328 * -> partly done, the missing parts are:
11329 * - a more complete unification would involve unifying the hregs as well, so
11330 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11331 * would no longer map to the machine hregs, so the code generators would need to
11332 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11333 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11334 * fp/non-fp branches speeds it up by about 15%.
11335 * - use sext/zext opcodes instead of shifts
11337 * - get rid of TEMPLOADs if possible and use vregs instead
11338 * - clean up usage of OP_P/OP_ opcodes
11339 * - cleanup usage of DUMMY_USE
11340 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11342 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11343 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11344 * - make sure handle_stack_args () is called before the branch is emitted
11345 * - when the new IR is done, get rid of all unused stuff
11346 * - COMPARE/BEQ as separate instructions or unify them ?
11347 * - keeping them separate allows specialized compare instructions like
11348 * compare_imm, compare_membase
11349 * - most back ends unify fp compare+branch, fp compare+ceq
11350 * - integrate mono_save_args into inline_method
11351 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11352 * - handle long shift opts on 32 bit platforms somehow: they require
11353 * 3 sregs (2 for arg1 and 1 for arg2)
11354 * - make byref a 'normal' type.
11355 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11356 * variable if needed.
11357 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11358 * like inline_method.
11359 * - remove inlining restrictions
11360 * - fix LNEG and enable cfold of INEG
11361 * - generalize x86 optimizations like ldelema as a peephole optimization
11362 * - add store_mem_imm for amd64
11363 * - optimize the loading of the interruption flag in the managed->native wrappers
11364 * - avoid special handling of OP_NOP in passes
11365 * - move code inserting instructions into one function/macro.
11366 * - try a coalescing phase after liveness analysis
11367 * - add float -> vreg conversion + local optimizations on !x86
11368 * - figure out how to handle decomposed branches during optimizations, ie.
11369 * compare+branch, op_jump_table+op_br etc.
11370 * - promote RuntimeXHandles to vregs
11371 * - vtype cleanups:
11372 * - add a NEW_VARLOADA_VREG macro
11373 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11374 * accessing vtype fields.
11375 * - get rid of I8CONST on 64 bit platforms
11376 * - dealing with the increase in code size due to branches created during opcode
11378 * - use extended basic blocks
11379 * - all parts of the JIT
11380 * - handle_global_vregs () && local regalloc
11381 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11382 * - sources of increase in code size:
11385 * - isinst and castclass
11386 * - lvregs not allocated to global registers even if used multiple times
11387 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11389 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11390 * - add all micro optimizations from the old JIT
11391 * - put tree optimizations into the deadce pass
11392 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11393 * specific function.
11394 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11395 * fcompare + branchCC.
11396 * - create a helper function for allocating a stack slot, taking into account
11397 * MONO_CFG_HAS_SPILLUP.
11399 * - merge the ia64 switch changes.
11400 * - optimize mono_regstate2_alloc_int/float.
11401 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11402 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11403 * parts of the tree could be separated by other instructions, killing the tree
11404 * arguments, or stores killing loads etc. Also, should we fold loads into other
11405 * instructions if the result of the load is used multiple times ?
11406 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11407 * - LAST MERGE: 108395.
11408 * - when returning vtypes in registers, generate IR and append it to the end of the
11409 * last bb instead of doing it in the epilog.
11410 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11418 - When to decompose opcodes:
11419 - earlier: this makes some optimizations hard to implement, since the low level IR
11420 no longer contains the neccessary information. But it is easier to do.
11421 - later: harder to implement, enables more optimizations.
11422 - Branches inside bblocks:
11423 - created when decomposing complex opcodes.
11424 - branches to another bblock: harmless, but not tracked by the branch
11425 optimizations, so need to branch to a label at the start of the bblock.
11426 - branches to inside the same bblock: very problematic, trips up the local
11427 reg allocator. Can be fixed by spitting the current bblock, but that is a
11428 complex operation, since some local vregs can become global vregs etc.
11429 - Local/global vregs:
11430 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11431 local register allocator.
11432 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11433 structure, created by mono_create_var (). Assigned to hregs or the stack by
11434 the global register allocator.
11435 - When to do optimizations like alu->alu_imm:
11436 - earlier -> saves work later on since the IR will be smaller/simpler
11437 - later -> can work on more instructions
11438 - Handling of valuetypes:
11439 - When a vtype is pushed on the stack, a new temporary is created, an
11440 instruction computing its address (LDADDR) is emitted and pushed on
11441 the stack. Need to optimize cases when the vtype is used immediately as in
11442 argument passing, stloc etc.
11443 - Instead of the to_end stuff in the old JIT, simply call the function handling
11444 the values on the stack before emitting the last instruction of the bb.
11447 #endif /* DISABLE_JIT */