2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2596 MonoInst *dummy_use;
2597 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2598 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2601 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2603 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2604 dummy_use->sreg1 = value_reg;
2605 MONO_ADD_INS (cfg->cbb, dummy_use);
2610 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2612 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2613 unsigned need_wb = 0;
2618 /*types with references can't have alignment smaller than sizeof(void*) */
2619 if (align < SIZEOF_VOID_P)
2622 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2623 if (size > 32 * SIZEOF_VOID_P)
2626 create_write_barrier_bitmap (klass, &need_wb, 0);
2628 /* We don't unroll more than 5 stores to avoid code bloat. */
2629 if (size > 5 * SIZEOF_VOID_P) {
2630 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2631 size += (SIZEOF_VOID_P - 1);
2632 size &= ~(SIZEOF_VOID_P - 1);
2634 EMIT_NEW_ICONST (cfg, iargs [2], size);
2635 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2636 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2640 destreg = iargs [0]->dreg;
2641 srcreg = iargs [1]->dreg;
2644 dest_ptr_reg = alloc_preg (cfg);
2645 tmp_reg = alloc_preg (cfg);
2648 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2650 while (size >= SIZEOF_VOID_P) {
2651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2652 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2655 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2657 offset += SIZEOF_VOID_P;
2658 size -= SIZEOF_VOID_P;
2661 /*tmp += sizeof (void*)*/
2662 if (size >= SIZEOF_VOID_P) {
2663 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2664 MONO_ADD_INS (cfg->cbb, iargs [0]);
2668 /* Those cannot be references since size < sizeof (void*) */
2670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2677 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2694 * Emit code to copy a valuetype of type @klass whose address is stored in
2695 * @src->dreg to memory whose address is stored at @dest->dreg.
2698 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2700 MonoInst *iargs [4];
2703 MonoMethod *memcpy_method;
2707 * This check breaks with spilled vars... need to handle it during verification anyway.
2708 * g_assert (klass && klass == src->klass && klass == dest->klass);
2712 n = mono_class_native_size (klass, &align);
2714 n = mono_class_value_size (klass, &align);
2716 /* if native is true there should be no references in the struct */
2717 if (cfg->gen_write_barriers && klass->has_references && !native) {
2718 /* Avoid barriers when storing to the stack */
2719 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2720 (dest->opcode == OP_LDADDR))) {
2721 int context_used = 0;
2726 if (cfg->generic_sharing_context)
2727 context_used = mono_class_check_context_used (klass);
2729 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2730 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2732 } else if (context_used) {
2733 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2735 if (cfg->compile_aot) {
2736 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2738 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2739 mono_class_compute_gc_descriptor (klass);
2743 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2748 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2749 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2750 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2754 EMIT_NEW_ICONST (cfg, iargs [2], n);
2756 memcpy_method = get_memcpy_method ();
2757 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2762 get_memset_method (void)
2764 static MonoMethod *memset_method = NULL;
2765 if (!memset_method) {
2766 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2768 g_error ("Old corlib found. Install a new one");
2770 return memset_method;
2774 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2776 MonoInst *iargs [3];
2779 MonoMethod *memset_method;
2781 /* FIXME: Optimize this for the case when dest is an LDADDR */
2783 mono_class_init (klass);
2784 n = mono_class_value_size (klass, &align);
2786 if (n <= sizeof (gpointer) * 5) {
2787 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2790 memset_method = get_memset_method ();
2792 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2793 EMIT_NEW_ICONST (cfg, iargs [2], n);
2794 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2799 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2801 MonoInst *this = NULL;
2803 g_assert (cfg->generic_sharing_context);
2805 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2806 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2807 !method->klass->valuetype)
2808 EMIT_NEW_ARGLOAD (cfg, this, 0);
2810 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2811 MonoInst *mrgctx_loc, *mrgctx_var;
2814 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2816 mrgctx_loc = mono_get_vtable_var (cfg);
2817 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2820 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2821 MonoInst *vtable_loc, *vtable_var;
2825 vtable_loc = mono_get_vtable_var (cfg);
2826 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2828 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2829 MonoInst *mrgctx_var = vtable_var;
2832 vtable_reg = alloc_preg (cfg);
2833 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2834 vtable_var->type = STACK_PTR;
2840 int vtable_reg, res_reg;
2842 vtable_reg = alloc_preg (cfg);
2843 res_reg = alloc_preg (cfg);
2844 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2849 static MonoJumpInfoRgctxEntry *
2850 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2852 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2853 res->method = method;
2854 res->in_mrgctx = in_mrgctx;
2855 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2856 res->data->type = patch_type;
2857 res->data->data.target = patch_data;
2858 res->info_type = info_type;
2863 static inline MonoInst*
2864 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2866 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2870 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2871 MonoClass *klass, int rgctx_type)
2873 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2874 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2876 return emit_rgctx_fetch (cfg, rgctx, entry);
2880 * emit_get_rgctx_method:
2882 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2883 * normal constants, else emit a load from the rgctx.
2886 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2887 MonoMethod *cmethod, int rgctx_type)
2889 if (!context_used) {
2892 switch (rgctx_type) {
2893 case MONO_RGCTX_INFO_METHOD:
2894 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2896 case MONO_RGCTX_INFO_METHOD_RGCTX:
2897 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2900 g_assert_not_reached ();
2903 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2904 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2906 return emit_rgctx_fetch (cfg, rgctx, entry);
2911 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2912 MonoClassField *field, int rgctx_type)
2914 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2915 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2917 return emit_rgctx_fetch (cfg, rgctx, entry);
2921 * On return the caller must check @klass for load errors.
2924 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2926 MonoInst *vtable_arg;
2928 int context_used = 0;
2930 if (cfg->generic_sharing_context)
2931 context_used = mono_class_check_context_used (klass);
2934 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2935 klass, MONO_RGCTX_INFO_VTABLE);
2937 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2941 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2944 if (COMPILE_LLVM (cfg))
2945 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2947 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2948 #ifdef MONO_ARCH_VTABLE_REG
2949 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2950 cfg->uses_vtable_reg = TRUE;
2957 * On return the caller must check @array_class for load errors
2960 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2962 int vtable_reg = alloc_preg (cfg);
2963 int context_used = 0;
2965 if (cfg->generic_sharing_context)
2966 context_used = mono_class_check_context_used (array_class);
2968 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2970 if (cfg->opt & MONO_OPT_SHARED) {
2971 int class_reg = alloc_preg (cfg);
2972 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2973 if (cfg->compile_aot) {
2974 int klass_reg = alloc_preg (cfg);
2975 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2976 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2978 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2980 } else if (context_used) {
2981 MonoInst *vtable_ins;
2983 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2984 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2986 if (cfg->compile_aot) {
2990 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2992 vt_reg = alloc_preg (cfg);
2993 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2994 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2997 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2999 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3003 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3007 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3009 if (mini_get_debug_options ()->better_cast_details) {
3010 int to_klass_reg = alloc_preg (cfg);
3011 int vtable_reg = alloc_preg (cfg);
3012 int klass_reg = alloc_preg (cfg);
3013 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3016 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3020 MONO_ADD_INS (cfg->cbb, tls_get);
3021 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3022 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3024 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3025 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3026 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3031 reset_cast_details (MonoCompile *cfg)
3033 /* Reset the variables holding the cast details */
3034 if (mini_get_debug_options ()->better_cast_details) {
3035 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3037 MONO_ADD_INS (cfg->cbb, tls_get);
3038 /* It is enough to reset the from field */
3039 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3044 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3045 * generic code is generated.
3048 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3050 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3053 MonoInst *rgctx, *addr;
3055 /* FIXME: What if the class is shared? We might not
3056 have to get the address of the method from the
3058 addr = emit_get_rgctx_method (cfg, context_used, method,
3059 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3061 rgctx = emit_get_rgctx (cfg, method, context_used);
3063 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3065 return mono_emit_method_call (cfg, method, &val, NULL);
3070 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3074 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3075 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3076 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3077 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3079 obj_reg = sp [0]->dreg;
3080 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3081 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3083 /* FIXME: generics */
3084 g_assert (klass->rank == 0);
3087 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3088 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3094 MonoInst *element_class;
3096 /* This assertion is from the unboxcast insn */
3097 g_assert (klass->rank == 0);
3099 element_class = emit_get_rgctx_klass (cfg, context_used,
3100 klass->element_class, MONO_RGCTX_INFO_KLASS);
3102 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3103 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3105 save_cast_details (cfg, klass->element_class, obj_reg);
3106 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3107 reset_cast_details (cfg);
3110 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3111 MONO_ADD_INS (cfg->cbb, add);
3112 add->type = STACK_MP;
3119 * Returns NULL and set the cfg exception on error.
3122 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3124 MonoInst *iargs [2];
3130 MonoInst *iargs [2];
3133 FIXME: we cannot get managed_alloc here because we can't get
3134 the class's vtable (because it's not a closed class)
3136 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3137 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3140 if (cfg->opt & MONO_OPT_SHARED)
3141 rgctx_info = MONO_RGCTX_INFO_KLASS;
3143 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3144 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3146 if (cfg->opt & MONO_OPT_SHARED) {
3147 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3149 alloc_ftn = mono_object_new;
3152 alloc_ftn = mono_object_new_specific;
3155 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3158 if (cfg->opt & MONO_OPT_SHARED) {
3159 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3160 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3162 alloc_ftn = mono_object_new;
3163 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3164 /* This happens often in argument checking code, eg. throw new FooException... */
3165 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3166 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3167 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3169 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3170 MonoMethod *managed_alloc = NULL;
3174 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3175 cfg->exception_ptr = klass;
3179 #ifndef MONO_CROSS_COMPILE
3180 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3183 if (managed_alloc) {
3184 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3185 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3187 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3189 guint32 lw = vtable->klass->instance_size;
3190 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3191 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3192 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3195 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3199 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3203 * Returns NULL and set the cfg exception on error.
3206 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3208 MonoInst *alloc, *ins;
3210 if (mono_class_is_nullable (klass)) {
3211 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3214 /* FIXME: What if the class is shared? We might not
3215 have to get the method address from the RGCTX. */
3216 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3217 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3218 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3220 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3222 return mono_emit_method_call (cfg, method, &val, NULL);
3226 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3230 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3235 // FIXME: This doesn't work yet (class libs tests fail?)
3236 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3239 * Returns NULL and set the cfg exception on error.
3242 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3244 MonoBasicBlock *is_null_bb;
3245 int obj_reg = src->dreg;
3246 int vtable_reg = alloc_preg (cfg);
3247 MonoInst *klass_inst = NULL;
3252 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3253 klass, MONO_RGCTX_INFO_KLASS);
3255 if (is_complex_isinst (klass)) {
3256 /* Complex case, handle by an icall */
3262 args [1] = klass_inst;
3264 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3266 /* Simple case, handled by the code below */
3270 NEW_BBLOCK (cfg, is_null_bb);
3272 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3273 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3275 save_cast_details (cfg, klass, obj_reg);
3277 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3278 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3279 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3281 int klass_reg = alloc_preg (cfg);
3283 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3285 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3286 /* the remoting code is broken, access the class for now */
3287 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3288 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3290 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3291 cfg->exception_ptr = klass;
3294 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3296 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3299 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3302 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3306 MONO_START_BB (cfg, is_null_bb);
3308 reset_cast_details (cfg);
3314 * Returns NULL and set the cfg exception on error.
3317 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3320 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3321 int obj_reg = src->dreg;
3322 int vtable_reg = alloc_preg (cfg);
3323 int res_reg = alloc_preg (cfg);
3324 MonoInst *klass_inst = NULL;
3327 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3329 if (is_complex_isinst (klass)) {
3332 /* Complex case, handle by an icall */
3338 args [1] = klass_inst;
3340 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3342 /* Simple case, the code below can handle it */
3346 NEW_BBLOCK (cfg, is_null_bb);
3347 NEW_BBLOCK (cfg, false_bb);
3348 NEW_BBLOCK (cfg, end_bb);
3350 /* Do the assignment at the beginning, so the other assignment can be if converted */
3351 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3352 ins->type = STACK_OBJ;
3355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3356 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3358 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3360 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3361 g_assert (!context_used);
3362 /* the is_null_bb target simply copies the input register to the output */
3363 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3365 int klass_reg = alloc_preg (cfg);
3368 int rank_reg = alloc_preg (cfg);
3369 int eclass_reg = alloc_preg (cfg);
3371 g_assert (!context_used);
3372 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3373 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3374 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3377 if (klass->cast_class == mono_defaults.object_class) {
3378 int parent_reg = alloc_preg (cfg);
3379 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3380 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3381 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3383 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3384 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3385 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3387 } else if (klass->cast_class == mono_defaults.enum_class) {
3388 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3389 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3390 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3391 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3393 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3394 /* Check that the object is a vector too */
3395 int bounds_reg = alloc_preg (cfg);
3396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3401 /* the is_null_bb target simply copies the input register to the output */
3402 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3404 } else if (mono_class_is_nullable (klass)) {
3405 g_assert (!context_used);
3406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3407 /* the is_null_bb target simply copies the input register to the output */
3408 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3410 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3411 g_assert (!context_used);
3412 /* the remoting code is broken, access the class for now */
3413 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3414 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3416 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3417 cfg->exception_ptr = klass;
3420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3423 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3429 /* the is_null_bb target simply copies the input register to the output */
3430 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3435 MONO_START_BB (cfg, false_bb);
3437 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3440 MONO_START_BB (cfg, is_null_bb);
3442 MONO_START_BB (cfg, end_bb);
3448 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3450 /* This opcode takes as input an object reference and a class, and returns:
3451 0) if the object is an instance of the class,
3452 1) if the object is not instance of the class,
3453 2) if the object is a proxy whose type cannot be determined */
3456 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3457 int obj_reg = src->dreg;
3458 int dreg = alloc_ireg (cfg);
3460 int klass_reg = alloc_preg (cfg);
3462 NEW_BBLOCK (cfg, true_bb);
3463 NEW_BBLOCK (cfg, false_bb);
3464 NEW_BBLOCK (cfg, false2_bb);
3465 NEW_BBLOCK (cfg, end_bb);
3466 NEW_BBLOCK (cfg, no_proxy_bb);
3468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3469 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3471 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3472 NEW_BBLOCK (cfg, interface_fail_bb);
3474 tmp_reg = alloc_preg (cfg);
3475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3476 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3477 MONO_START_BB (cfg, interface_fail_bb);
3478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3480 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3482 tmp_reg = alloc_preg (cfg);
3483 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3485 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3487 tmp_reg = alloc_preg (cfg);
3488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3491 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3492 tmp_reg = alloc_preg (cfg);
3493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3496 tmp_reg = alloc_preg (cfg);
3497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3499 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3501 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3504 MONO_START_BB (cfg, no_proxy_bb);
3506 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3509 MONO_START_BB (cfg, false_bb);
3511 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3514 MONO_START_BB (cfg, false2_bb);
3516 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3519 MONO_START_BB (cfg, true_bb);
3521 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3523 MONO_START_BB (cfg, end_bb);
3526 MONO_INST_NEW (cfg, ins, OP_ICONST);
3528 ins->type = STACK_I4;
3534 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3536 /* This opcode takes as input an object reference and a class, and returns:
3537 0) if the object is an instance of the class,
3538 1) if the object is a proxy whose type cannot be determined
3539 an InvalidCastException exception is thrown otherwhise*/
3542 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3543 int obj_reg = src->dreg;
3544 int dreg = alloc_ireg (cfg);
3545 int tmp_reg = alloc_preg (cfg);
3546 int klass_reg = alloc_preg (cfg);
3548 NEW_BBLOCK (cfg, end_bb);
3549 NEW_BBLOCK (cfg, ok_result_bb);
3551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3554 save_cast_details (cfg, klass, obj_reg);
3556 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3557 NEW_BBLOCK (cfg, interface_fail_bb);
3559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3560 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3561 MONO_START_BB (cfg, interface_fail_bb);
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3564 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3566 tmp_reg = alloc_preg (cfg);
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3569 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3571 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3572 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3575 NEW_BBLOCK (cfg, no_proxy_bb);
3577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3579 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3581 tmp_reg = alloc_preg (cfg);
3582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3585 tmp_reg = alloc_preg (cfg);
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3590 NEW_BBLOCK (cfg, fail_1_bb);
3592 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3594 MONO_START_BB (cfg, fail_1_bb);
3596 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3599 MONO_START_BB (cfg, no_proxy_bb);
3601 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3604 MONO_START_BB (cfg, ok_result_bb);
3606 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3608 MONO_START_BB (cfg, end_bb);
3611 MONO_INST_NEW (cfg, ins, OP_ICONST);
3613 ins->type = STACK_I4;
3619 * Returns NULL and set the cfg exception on error.
3621 static G_GNUC_UNUSED MonoInst*
3622 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3624 gpointer *trampoline;
3625 MonoInst *obj, *method_ins, *tramp_ins;
3629 obj = handle_alloc (cfg, klass, FALSE, 0);
3633 /* Inline the contents of mono_delegate_ctor */
3635 /* Set target field */
3636 /* Optimize away setting of NULL target */
3637 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3638 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3640 /* Set method field */
3641 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3642 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3645 * To avoid looking up the compiled code belonging to the target method
3646 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3647 * store it, and we fill it after the method has been compiled.
3649 if (!cfg->compile_aot && !method->dynamic) {
3650 MonoInst *code_slot_ins;
3653 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3655 domain = mono_domain_get ();
3656 mono_domain_lock (domain);
3657 if (!domain_jit_info (domain)->method_code_hash)
3658 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3659 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3661 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3662 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3664 mono_domain_unlock (domain);
3666 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3671 /* Set invoke_impl field */
3672 if (cfg->compile_aot) {
3673 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3675 trampoline = mono_create_delegate_trampoline (klass);
3676 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3680 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3686 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3688 MonoJitICallInfo *info;
3690 /* Need to register the icall so it gets an icall wrapper */
3691 info = mono_get_array_new_va_icall (rank);
3693 cfg->flags |= MONO_CFG_HAS_VARARGS;
3695 /* mono_array_new_va () needs a vararg calling convention */
3696 cfg->disable_llvm = TRUE;
3698 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3699 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3703 mono_emit_load_got_addr (MonoCompile *cfg)
3705 MonoInst *getaddr, *dummy_use;
3707 if (!cfg->got_var || cfg->got_var_allocated)
3710 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3711 getaddr->dreg = cfg->got_var->dreg;
3713 /* Add it to the start of the first bblock */
3714 if (cfg->bb_entry->code) {
3715 getaddr->next = cfg->bb_entry->code;
3716 cfg->bb_entry->code = getaddr;
3719 MONO_ADD_INS (cfg->bb_entry, getaddr);
3721 cfg->got_var_allocated = TRUE;
3724 * Add a dummy use to keep the got_var alive, since real uses might
3725 * only be generated by the back ends.
3726 * Add it to end_bblock, so the variable's lifetime covers the whole
3728 * It would be better to make the usage of the got var explicit in all
3729 * cases when the backend needs it (i.e. calls, throw etc.), so this
3730 * wouldn't be needed.
3732 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3733 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3736 static int inline_limit;
3737 static gboolean inline_limit_inited;
3740 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3742 MonoMethodHeaderSummary header;
3744 #ifdef MONO_ARCH_SOFT_FLOAT
3745 MonoMethodSignature *sig = mono_method_signature (method);
3749 if (cfg->generic_sharing_context)
3752 if (cfg->inline_depth > 10)
3755 #ifdef MONO_ARCH_HAVE_LMF_OPS
3756 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3757 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3758 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3763 if (!mono_method_get_header_summary (method, &header))
3766 /*runtime, icall and pinvoke are checked by summary call*/
3767 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3768 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3769 (method->klass->marshalbyref) ||
3773 /* also consider num_locals? */
3774 /* Do the size check early to avoid creating vtables */
3775 if (!inline_limit_inited) {
3776 if (getenv ("MONO_INLINELIMIT"))
3777 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3779 inline_limit = INLINE_LENGTH_LIMIT;
3780 inline_limit_inited = TRUE;
3782 if (header.code_size >= inline_limit)
3786 * if we can initialize the class of the method right away, we do,
3787 * otherwise we don't allow inlining if the class needs initialization,
3788 * since it would mean inserting a call to mono_runtime_class_init()
3789 * inside the inlined code
3791 if (!(cfg->opt & MONO_OPT_SHARED)) {
3792 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3793 if (cfg->run_cctors && method->klass->has_cctor) {
3794 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3795 if (!method->klass->runtime_info)
3796 /* No vtable created yet */
3798 vtable = mono_class_vtable (cfg->domain, method->klass);
3801 /* This makes so that inline cannot trigger */
3802 /* .cctors: too many apps depend on them */
3803 /* running with a specific order... */
3804 if (! vtable->initialized)
3806 mono_runtime_class_init (vtable);
3808 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3809 if (!method->klass->runtime_info)
3810 /* No vtable created yet */
3812 vtable = mono_class_vtable (cfg->domain, method->klass);
3815 if (!vtable->initialized)
3820 * If we're compiling for shared code
3821 * the cctor will need to be run at aot method load time, for example,
3822 * or at the end of the compilation of the inlining method.
3824 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3829 * CAS - do not inline methods with declarative security
3830 * Note: this has to be before any possible return TRUE;
3832 if (mono_method_has_declsec (method))
3835 #ifdef MONO_ARCH_SOFT_FLOAT
3837 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3839 for (i = 0; i < sig->param_count; ++i)
3840 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3848 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3850 if (vtable->initialized && !cfg->compile_aot)
3853 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3856 if (!mono_class_needs_cctor_run (vtable->klass, method))
3859 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3860 /* The initialization is already done before the method is called */
3867 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3871 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3873 mono_class_init (klass);
3874 size = mono_class_array_element_size (klass);
3876 mult_reg = alloc_preg (cfg);
3877 array_reg = arr->dreg;
3878 index_reg = index->dreg;
3880 #if SIZEOF_REGISTER == 8
3881 /* The array reg is 64 bits but the index reg is only 32 */
3882 if (COMPILE_LLVM (cfg)) {
3884 index2_reg = index_reg;
3886 index2_reg = alloc_preg (cfg);
3887 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3890 if (index->type == STACK_I8) {
3891 index2_reg = alloc_preg (cfg);
3892 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3894 index2_reg = index_reg;
3899 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3901 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3902 if (size == 1 || size == 2 || size == 4 || size == 8) {
3903 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3905 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3906 ins->type = STACK_PTR;
3912 add_reg = alloc_preg (cfg);
3914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3915 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3916 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3917 ins->type = STACK_PTR;
3918 MONO_ADD_INS (cfg->cbb, ins);
3923 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3925 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3927 int bounds_reg = alloc_preg (cfg);
3928 int add_reg = alloc_preg (cfg);
3929 int mult_reg = alloc_preg (cfg);
3930 int mult2_reg = alloc_preg (cfg);
3931 int low1_reg = alloc_preg (cfg);
3932 int low2_reg = alloc_preg (cfg);
3933 int high1_reg = alloc_preg (cfg);
3934 int high2_reg = alloc_preg (cfg);
3935 int realidx1_reg = alloc_preg (cfg);
3936 int realidx2_reg = alloc_preg (cfg);
3937 int sum_reg = alloc_preg (cfg);
3942 mono_class_init (klass);
3943 size = mono_class_array_element_size (klass);
3945 index1 = index_ins1->dreg;
3946 index2 = index_ins2->dreg;
3948 /* range checking */
3949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3950 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3952 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3953 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3954 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3956 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3957 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3958 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3960 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3961 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3962 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3964 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3965 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3966 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3968 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3969 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3971 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3972 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3974 ins->type = STACK_MP;
3976 MONO_ADD_INS (cfg->cbb, ins);
3983 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3987 MonoMethod *addr_method;
3990 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3993 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3995 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3996 /* emit_ldelema_2 depends on OP_LMUL */
3997 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3998 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4002 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4003 addr_method = mono_marshal_get_array_address (rank, element_size);
4004 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4009 static MonoBreakPolicy
4010 always_insert_breakpoint (MonoMethod *method)
4012 return MONO_BREAK_POLICY_ALWAYS;
4015 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4018 * mono_set_break_policy:
4019 * policy_callback: the new callback function
4021 * Allow embedders to decide wherther to actually obey breakpoint instructions
4022 * (both break IL instructions and Debugger.Break () method calls), for example
4023 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4024 * untrusted or semi-trusted code.
4026 * @policy_callback will be called every time a break point instruction needs to
4027 * be inserted with the method argument being the method that calls Debugger.Break()
4028 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4029 * if it wants the breakpoint to not be effective in the given method.
4030 * #MONO_BREAK_POLICY_ALWAYS is the default.
4033 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4035 if (policy_callback)
4036 break_policy_func = policy_callback;
4038 break_policy_func = always_insert_breakpoint;
4042 should_insert_brekpoint (MonoMethod *method) {
4043 switch (break_policy_func (method)) {
4044 case MONO_BREAK_POLICY_ALWAYS:
4046 case MONO_BREAK_POLICY_NEVER:
4048 case MONO_BREAK_POLICY_ON_DBG:
4049 return mono_debug_using_mono_debugger ();
4051 g_warning ("Incorrect value returned from break policy callback");
4056 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4058 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4060 MonoInst *addr, *store, *load;
4061 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4063 /* the bounds check is already done by the callers */
4064 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4066 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4067 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4069 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4070 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4076 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4078 MonoInst *ins = NULL;
4079 #ifdef MONO_ARCH_SIMD_INTRINSICS
4080 if (cfg->opt & MONO_OPT_SIMD) {
4081 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4091 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4093 MonoInst *ins = NULL;
4095 static MonoClass *runtime_helpers_class = NULL;
4096 if (! runtime_helpers_class)
4097 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4098 "System.Runtime.CompilerServices", "RuntimeHelpers");
4100 if (cmethod->klass == mono_defaults.string_class) {
4101 if (strcmp (cmethod->name, "get_Chars") == 0) {
4102 int dreg = alloc_ireg (cfg);
4103 int index_reg = alloc_preg (cfg);
4104 int mult_reg = alloc_preg (cfg);
4105 int add_reg = alloc_preg (cfg);
4107 #if SIZEOF_REGISTER == 8
4108 /* The array reg is 64 bits but the index reg is only 32 */
4109 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4111 index_reg = args [1]->dreg;
4113 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4115 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4116 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4117 add_reg = ins->dreg;
4118 /* Avoid a warning */
4120 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4124 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4125 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4126 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4128 type_from_op (ins, NULL, NULL);
4130 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4131 int dreg = alloc_ireg (cfg);
4132 /* Decompose later to allow more optimizations */
4133 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4134 ins->type = STACK_I4;
4135 ins->flags |= MONO_INST_FAULT;
4136 cfg->cbb->has_array_access = TRUE;
4137 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4140 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4141 int mult_reg = alloc_preg (cfg);
4142 int add_reg = alloc_preg (cfg);
4144 /* The corlib functions check for oob already. */
4145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4146 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4148 return cfg->cbb->last_ins;
4151 } else if (cmethod->klass == mono_defaults.object_class) {
4153 if (strcmp (cmethod->name, "GetType") == 0) {
4154 int dreg = alloc_preg (cfg);
4155 int vt_reg = alloc_preg (cfg);
4156 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4157 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4158 type_from_op (ins, NULL, NULL);
4161 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4162 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4163 int dreg = alloc_ireg (cfg);
4164 int t1 = alloc_ireg (cfg);
4166 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4167 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4168 ins->type = STACK_I4;
4172 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4173 MONO_INST_NEW (cfg, ins, OP_NOP);
4174 MONO_ADD_INS (cfg->cbb, ins);
4178 } else if (cmethod->klass == mono_defaults.array_class) {
4179 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4180 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4182 #ifndef MONO_BIG_ARRAYS
4184 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4187 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4188 int dreg = alloc_ireg (cfg);
4189 int bounds_reg = alloc_ireg (cfg);
4190 MonoBasicBlock *end_bb, *szarray_bb;
4191 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4193 NEW_BBLOCK (cfg, end_bb);
4194 NEW_BBLOCK (cfg, szarray_bb);
4196 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4197 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4198 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4199 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4200 /* Non-szarray case */
4202 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4203 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4205 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4206 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4207 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4208 MONO_START_BB (cfg, szarray_bb);
4211 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4212 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4214 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4215 MONO_START_BB (cfg, end_bb);
4217 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4218 ins->type = STACK_I4;
4224 if (cmethod->name [0] != 'g')
4227 if (strcmp (cmethod->name, "get_Rank") == 0) {
4228 int dreg = alloc_ireg (cfg);
4229 int vtable_reg = alloc_preg (cfg);
4230 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4231 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4232 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4233 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4234 type_from_op (ins, NULL, NULL);
4237 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4238 int dreg = alloc_ireg (cfg);
4240 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4241 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4242 type_from_op (ins, NULL, NULL);
4247 } else if (cmethod->klass == runtime_helpers_class) {
4249 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4250 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4254 } else if (cmethod->klass == mono_defaults.thread_class) {
4255 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4256 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4257 MONO_ADD_INS (cfg->cbb, ins);
4259 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4260 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4261 MONO_ADD_INS (cfg->cbb, ins);
4264 } else if (cmethod->klass == mono_defaults.monitor_class) {
4265 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4266 /* The trampolines don't work under SGEN */
4267 gboolean is_moving_gc = mono_gc_is_moving ();
4269 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
4272 if (COMPILE_LLVM (cfg)) {
4274 * Pass the argument normally, the LLVM backend will handle the
4275 * calling convention problems.
4277 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4279 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4280 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4281 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4282 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4285 return (MonoInst*)call;
4286 } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
4289 if (COMPILE_LLVM (cfg)) {
4290 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4292 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4293 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4294 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4295 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4298 return (MonoInst*)call;
4300 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4301 MonoMethod *fast_method = NULL;
4303 /* Avoid infinite recursion */
4304 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4305 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4306 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4309 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4310 strcmp (cmethod->name, "Exit") == 0)
4311 fast_method = mono_monitor_get_fast_path (cmethod);
4315 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4317 } else if (cmethod->klass->image == mono_defaults.corlib &&
4318 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4319 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4322 #if SIZEOF_REGISTER == 8
4323 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4324 /* 64 bit reads are already atomic */
4325 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4326 ins->dreg = mono_alloc_preg (cfg);
4327 ins->inst_basereg = args [0]->dreg;
4328 ins->inst_offset = 0;
4329 MONO_ADD_INS (cfg->cbb, ins);
4333 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4334 if (strcmp (cmethod->name, "Increment") == 0) {
4335 MonoInst *ins_iconst;
4338 if (fsig->params [0]->type == MONO_TYPE_I4)
4339 opcode = OP_ATOMIC_ADD_NEW_I4;
4340 #if SIZEOF_REGISTER == 8
4341 else if (fsig->params [0]->type == MONO_TYPE_I8)
4342 opcode = OP_ATOMIC_ADD_NEW_I8;
4345 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4346 ins_iconst->inst_c0 = 1;
4347 ins_iconst->dreg = mono_alloc_ireg (cfg);
4348 MONO_ADD_INS (cfg->cbb, ins_iconst);
4350 MONO_INST_NEW (cfg, ins, opcode);
4351 ins->dreg = mono_alloc_ireg (cfg);
4352 ins->inst_basereg = args [0]->dreg;
4353 ins->inst_offset = 0;
4354 ins->sreg2 = ins_iconst->dreg;
4355 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4356 MONO_ADD_INS (cfg->cbb, ins);
4358 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4359 MonoInst *ins_iconst;
4362 if (fsig->params [0]->type == MONO_TYPE_I4)
4363 opcode = OP_ATOMIC_ADD_NEW_I4;
4364 #if SIZEOF_REGISTER == 8
4365 else if (fsig->params [0]->type == MONO_TYPE_I8)
4366 opcode = OP_ATOMIC_ADD_NEW_I8;
4369 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4370 ins_iconst->inst_c0 = -1;
4371 ins_iconst->dreg = mono_alloc_ireg (cfg);
4372 MONO_ADD_INS (cfg->cbb, ins_iconst);
4374 MONO_INST_NEW (cfg, ins, opcode);
4375 ins->dreg = mono_alloc_ireg (cfg);
4376 ins->inst_basereg = args [0]->dreg;
4377 ins->inst_offset = 0;
4378 ins->sreg2 = ins_iconst->dreg;
4379 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4380 MONO_ADD_INS (cfg->cbb, ins);
4382 } else if (strcmp (cmethod->name, "Add") == 0) {
4385 if (fsig->params [0]->type == MONO_TYPE_I4)
4386 opcode = OP_ATOMIC_ADD_NEW_I4;
4387 #if SIZEOF_REGISTER == 8
4388 else if (fsig->params [0]->type == MONO_TYPE_I8)
4389 opcode = OP_ATOMIC_ADD_NEW_I8;
4393 MONO_INST_NEW (cfg, ins, opcode);
4394 ins->dreg = mono_alloc_ireg (cfg);
4395 ins->inst_basereg = args [0]->dreg;
4396 ins->inst_offset = 0;
4397 ins->sreg2 = args [1]->dreg;
4398 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4399 MONO_ADD_INS (cfg->cbb, ins);
4402 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4404 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4405 if (strcmp (cmethod->name, "Exchange") == 0) {
4407 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4409 if (fsig->params [0]->type == MONO_TYPE_I4)
4410 opcode = OP_ATOMIC_EXCHANGE_I4;
4411 #if SIZEOF_REGISTER == 8
4412 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4413 (fsig->params [0]->type == MONO_TYPE_I))
4414 opcode = OP_ATOMIC_EXCHANGE_I8;
4416 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4417 opcode = OP_ATOMIC_EXCHANGE_I4;
4422 MONO_INST_NEW (cfg, ins, opcode);
4423 ins->dreg = mono_alloc_ireg (cfg);
4424 ins->inst_basereg = args [0]->dreg;
4425 ins->inst_offset = 0;
4426 ins->sreg2 = args [1]->dreg;
4427 MONO_ADD_INS (cfg->cbb, ins);
4429 switch (fsig->params [0]->type) {
4431 ins->type = STACK_I4;
4435 ins->type = STACK_I8;
4437 case MONO_TYPE_OBJECT:
4438 ins->type = STACK_OBJ;
4441 g_assert_not_reached ();
4444 if (cfg->gen_write_barriers && is_ref)
4445 emit_write_barrier (cfg, args [0], args [1], -1);
4447 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4449 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4450 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4452 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4453 if (fsig->params [1]->type == MONO_TYPE_I4)
4455 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4456 size = sizeof (gpointer);
4457 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4460 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4461 ins->dreg = alloc_ireg (cfg);
4462 ins->sreg1 = args [0]->dreg;
4463 ins->sreg2 = args [1]->dreg;
4464 ins->sreg3 = args [2]->dreg;
4465 ins->type = STACK_I4;
4466 MONO_ADD_INS (cfg->cbb, ins);
4467 } else if (size == 8) {
4468 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4469 ins->dreg = alloc_ireg (cfg);
4470 ins->sreg1 = args [0]->dreg;
4471 ins->sreg2 = args [1]->dreg;
4472 ins->sreg3 = args [2]->dreg;
4473 ins->type = STACK_I8;
4474 MONO_ADD_INS (cfg->cbb, ins);
4476 /* g_assert_not_reached (); */
4478 if (cfg->gen_write_barriers && is_ref)
4479 emit_write_barrier (cfg, args [0], args [1], -1);
4481 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4485 } else if (cmethod->klass->image == mono_defaults.corlib) {
4486 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4487 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4488 if (should_insert_brekpoint (cfg->method))
4489 MONO_INST_NEW (cfg, ins, OP_BREAK);
4491 MONO_INST_NEW (cfg, ins, OP_NOP);
4492 MONO_ADD_INS (cfg->cbb, ins);
4495 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4496 && strcmp (cmethod->klass->name, "Environment") == 0) {
4498 EMIT_NEW_ICONST (cfg, ins, 1);
4500 EMIT_NEW_ICONST (cfg, ins, 0);
4504 } else if (cmethod->klass == mono_defaults.math_class) {
4506 * There is general branches code for Min/Max, but it does not work for
4508 * http://everything2.com/?node_id=1051618
4512 #ifdef MONO_ARCH_SIMD_INTRINSICS
4513 if (cfg->opt & MONO_OPT_SIMD) {
4514 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4520 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4524 * This entry point could be used later for arbitrary method
4527 inline static MonoInst*
4528 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4529 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4531 if (method->klass == mono_defaults.string_class) {
4532 /* managed string allocation support */
4533 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4534 MonoInst *iargs [2];
4535 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4536 MonoMethod *managed_alloc = NULL;
4538 g_assert (vtable); /*Should not fail since it System.String*/
4539 #ifndef MONO_CROSS_COMPILE
4540 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4544 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4545 iargs [1] = args [0];
4546 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4553 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4555 MonoInst *store, *temp;
4558 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4559 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4562 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4563 * would be different than the MonoInst's used to represent arguments, and
4564 * the ldelema implementation can't deal with that.
4565 * Solution: When ldelema is used on an inline argument, create a var for
4566 * it, emit ldelema on that var, and emit the saving code below in
4567 * inline_method () if needed.
4569 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4570 cfg->args [i] = temp;
4571 /* This uses cfg->args [i] which is set by the preceeding line */
4572 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4573 store->cil_code = sp [0]->cil_code;
4578 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4579 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4581 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4583 check_inline_called_method_name_limit (MonoMethod *called_method)
4586 static char *limit = NULL;
4588 if (limit == NULL) {
4589 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4591 if (limit_string != NULL)
4592 limit = limit_string;
4594 limit = (char *) "";
4597 if (limit [0] != '\0') {
4598 char *called_method_name = mono_method_full_name (called_method, TRUE);
4600 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4601 g_free (called_method_name);
4603 //return (strncmp_result <= 0);
4604 return (strncmp_result == 0);
4611 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4613 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4616 static char *limit = NULL;
4618 if (limit == NULL) {
4619 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4620 if (limit_string != NULL) {
4621 limit = limit_string;
4623 limit = (char *) "";
4627 if (limit [0] != '\0') {
4628 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4630 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4631 g_free (caller_method_name);
4633 //return (strncmp_result <= 0);
4634 return (strncmp_result == 0);
4642 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4643 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4645 MonoInst *ins, *rvar = NULL;
4646 MonoMethodHeader *cheader;
4647 MonoBasicBlock *ebblock, *sbblock;
4649 MonoMethod *prev_inlined_method;
4650 MonoInst **prev_locals, **prev_args;
4651 MonoType **prev_arg_types;
4652 guint prev_real_offset;
4653 GHashTable *prev_cbb_hash;
4654 MonoBasicBlock **prev_cil_offset_to_bb;
4655 MonoBasicBlock *prev_cbb;
4656 unsigned char* prev_cil_start;
4657 guint32 prev_cil_offset_to_bb_len;
4658 MonoMethod *prev_current_method;
4659 MonoGenericContext *prev_generic_context;
4660 gboolean ret_var_set, prev_ret_var_set;
4662 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4664 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4665 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4668 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4669 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4673 if (cfg->verbose_level > 2)
4674 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4676 if (!cmethod->inline_info) {
4677 mono_jit_stats.inlineable_methods++;
4678 cmethod->inline_info = 1;
4681 /* allocate local variables */
4682 cheader = mono_method_get_header (cmethod);
4684 if (cheader == NULL || mono_loader_get_last_error ()) {
4686 mono_metadata_free_mh (cheader);
4687 mono_loader_clear_error ();
4691 /* allocate space to store the return value */
4692 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4693 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4697 prev_locals = cfg->locals;
4698 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4699 for (i = 0; i < cheader->num_locals; ++i)
4700 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4702 /* allocate start and end blocks */
4703 /* This is needed so if the inline is aborted, we can clean up */
4704 NEW_BBLOCK (cfg, sbblock);
4705 sbblock->real_offset = real_offset;
4707 NEW_BBLOCK (cfg, ebblock);
4708 ebblock->block_num = cfg->num_bblocks++;
4709 ebblock->real_offset = real_offset;
4711 prev_args = cfg->args;
4712 prev_arg_types = cfg->arg_types;
4713 prev_inlined_method = cfg->inlined_method;
4714 cfg->inlined_method = cmethod;
4715 cfg->ret_var_set = FALSE;
4716 cfg->inline_depth ++;
4717 prev_real_offset = cfg->real_offset;
4718 prev_cbb_hash = cfg->cbb_hash;
4719 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4720 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4721 prev_cil_start = cfg->cil_start;
4722 prev_cbb = cfg->cbb;
4723 prev_current_method = cfg->current_method;
4724 prev_generic_context = cfg->generic_context;
4725 prev_ret_var_set = cfg->ret_var_set;
4727 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4729 ret_var_set = cfg->ret_var_set;
4731 cfg->inlined_method = prev_inlined_method;
4732 cfg->real_offset = prev_real_offset;
4733 cfg->cbb_hash = prev_cbb_hash;
4734 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4735 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4736 cfg->cil_start = prev_cil_start;
4737 cfg->locals = prev_locals;
4738 cfg->args = prev_args;
4739 cfg->arg_types = prev_arg_types;
4740 cfg->current_method = prev_current_method;
4741 cfg->generic_context = prev_generic_context;
4742 cfg->ret_var_set = prev_ret_var_set;
4743 cfg->inline_depth --;
4745 if ((costs >= 0 && costs < 60) || inline_allways) {
4746 if (cfg->verbose_level > 2)
4747 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4749 mono_jit_stats.inlined_methods++;
4751 /* always add some code to avoid block split failures */
4752 MONO_INST_NEW (cfg, ins, OP_NOP);
4753 MONO_ADD_INS (prev_cbb, ins);
4755 prev_cbb->next_bb = sbblock;
4756 link_bblock (cfg, prev_cbb, sbblock);
4759 * Get rid of the begin and end bblocks if possible to aid local
4762 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4764 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4765 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4767 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4768 MonoBasicBlock *prev = ebblock->in_bb [0];
4769 mono_merge_basic_blocks (cfg, prev, ebblock);
4771 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4772 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4773 cfg->cbb = prev_cbb;
4781 * If the inlined method contains only a throw, then the ret var is not
4782 * set, so set it to a dummy value.
4785 static double r8_0 = 0.0;
4787 switch (rvar->type) {
4789 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4792 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4797 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4800 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4801 ins->type = STACK_R8;
4802 ins->inst_p0 = (void*)&r8_0;
4803 ins->dreg = rvar->dreg;
4804 MONO_ADD_INS (cfg->cbb, ins);
4807 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4810 g_assert_not_reached ();
4814 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4817 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4820 if (cfg->verbose_level > 2)
4821 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4822 cfg->exception_type = MONO_EXCEPTION_NONE;
4823 mono_loader_clear_error ();
4825 /* This gets rid of the newly added bblocks */
4826 cfg->cbb = prev_cbb;
4828 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4833 * Some of these comments may well be out-of-date.
4834 * Design decisions: we do a single pass over the IL code (and we do bblock
4835 * splitting/merging in the few cases when it's required: a back jump to an IL
4836 * address that was not already seen as bblock starting point).
4837 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4838 * Complex operations are decomposed in simpler ones right away. We need to let the
4839 * arch-specific code peek and poke inside this process somehow (except when the
4840 * optimizations can take advantage of the full semantic info of coarse opcodes).
4841 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4842 * MonoInst->opcode initially is the IL opcode or some simplification of that
4843 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4844 * opcode with value bigger than OP_LAST.
4845 * At this point the IR can be handed over to an interpreter, a dumb code generator
4846 * or to the optimizing code generator that will translate it to SSA form.
4848 * Profiling directed optimizations.
4849 * We may compile by default with few or no optimizations and instrument the code
4850 * or the user may indicate what methods to optimize the most either in a config file
4851 * or through repeated runs where the compiler applies offline the optimizations to
4852 * each method and then decides if it was worth it.
4855 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4856 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4857 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4858 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4859 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4860 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4861 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4862 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4864 /* offset from br.s -> br like opcodes */
4865 #define BIG_BRANCH_OFFSET 13
4868 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4870 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4872 return b == NULL || b == bb;
4876 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4878 unsigned char *ip = start;
4879 unsigned char *target;
4882 MonoBasicBlock *bblock;
4883 const MonoOpcode *opcode;
4886 cli_addr = ip - start;
4887 i = mono_opcode_value ((const guint8 **)&ip, end);
4890 opcode = &mono_opcodes [i];
4891 switch (opcode->argument) {
4892 case MonoInlineNone:
4895 case MonoInlineString:
4896 case MonoInlineType:
4897 case MonoInlineField:
4898 case MonoInlineMethod:
4901 case MonoShortInlineR:
4908 case MonoShortInlineVar:
4909 case MonoShortInlineI:
4912 case MonoShortInlineBrTarget:
4913 target = start + cli_addr + 2 + (signed char)ip [1];
4914 GET_BBLOCK (cfg, bblock, target);
4917 GET_BBLOCK (cfg, bblock, ip);
4919 case MonoInlineBrTarget:
4920 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4921 GET_BBLOCK (cfg, bblock, target);
4924 GET_BBLOCK (cfg, bblock, ip);
4926 case MonoInlineSwitch: {
4927 guint32 n = read32 (ip + 1);
4930 cli_addr += 5 + 4 * n;
4931 target = start + cli_addr;
4932 GET_BBLOCK (cfg, bblock, target);
4934 for (j = 0; j < n; ++j) {
4935 target = start + cli_addr + (gint32)read32 (ip);
4936 GET_BBLOCK (cfg, bblock, target);
4946 g_assert_not_reached ();
4949 if (i == CEE_THROW) {
4950 unsigned char *bb_start = ip - 1;
4952 /* Find the start of the bblock containing the throw */
4954 while ((bb_start >= start) && !bblock) {
4955 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4959 bblock->out_of_line = 1;
4968 static inline MonoMethod *
4969 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4973 if (m->wrapper_type != MONO_WRAPPER_NONE)
4974 return mono_method_get_wrapper_data (m, token);
4976 method = mono_get_method_full (m->klass->image, token, klass, context);
4981 static inline MonoMethod *
4982 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4984 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4986 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4992 static inline MonoClass*
4993 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4997 if (method->wrapper_type != MONO_WRAPPER_NONE)
4998 klass = mono_method_get_wrapper_data (method, token);
5000 klass = mono_class_get_full (method->klass->image, token, context);
5002 mono_class_init (klass);
5007 * Returns TRUE if the JIT should abort inlining because "callee"
5008 * is influenced by security attributes.
5011 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5015 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5019 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5020 if (result == MONO_JIT_SECURITY_OK)
5023 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5024 /* Generate code to throw a SecurityException before the actual call/link */
5025 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5028 NEW_ICONST (cfg, args [0], 4);
5029 NEW_METHODCONST (cfg, args [1], caller);
5030 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5031 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5032 /* don't hide previous results */
5033 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5034 cfg->exception_data = result;
5042 throw_exception (void)
5044 static MonoMethod *method = NULL;
5047 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5048 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5055 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5057 MonoMethod *thrower = throw_exception ();
5060 EMIT_NEW_PCONST (cfg, args [0], ex);
5061 mono_emit_method_call (cfg, thrower, args, NULL);
5065 * Return the original method is a wrapper is specified. We can only access
5066 * the custom attributes from the original method.
5069 get_original_method (MonoMethod *method)
5071 if (method->wrapper_type == MONO_WRAPPER_NONE)
5074 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5075 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5078 /* in other cases we need to find the original method */
5079 return mono_marshal_method_from_wrapper (method);
5083 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5084 MonoBasicBlock *bblock, unsigned char *ip)
5086 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5087 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5089 emit_throw_exception (cfg, ex);
5093 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5094 MonoBasicBlock *bblock, unsigned char *ip)
5096 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5097 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5099 emit_throw_exception (cfg, ex);
5103 * Check that the IL instructions at ip are the array initialization
5104 * sequence and return the pointer to the data and the size.
5107 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5110 * newarr[System.Int32]
5112 * ldtoken field valuetype ...
5113 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5115 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5116 guint32 token = read32 (ip + 7);
5117 guint32 field_token = read32 (ip + 2);
5118 guint32 field_index = field_token & 0xffffff;
5120 const char *data_ptr;
5122 MonoMethod *cmethod;
5123 MonoClass *dummy_class;
5124 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5130 *out_field_token = field_token;
5132 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5135 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5137 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5138 case MONO_TYPE_BOOLEAN:
5142 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5143 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5144 case MONO_TYPE_CHAR:
5154 return NULL; /* stupid ARM FP swapped format */
5164 if (size > mono_type_size (field->type, &dummy_align))
5167 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5168 if (!method->klass->image->dynamic) {
5169 field_index = read32 (ip + 2) & 0xffffff;
5170 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5171 data_ptr = mono_image_rva_map (method->klass->image, rva);
5172 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5173 /* for aot code we do the lookup on load */
5174 if (aot && data_ptr)
5175 return GUINT_TO_POINTER (rva);
5177 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5179 data_ptr = mono_field_get_data (field);
5187 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5189 char *method_fname = mono_method_full_name (method, TRUE);
5191 MonoMethodHeader *header = mono_method_get_header (method);
5193 if (header->code_size == 0)
5194 method_code = g_strdup ("method body is empty.");
5196 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5197 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5198 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5199 g_free (method_fname);
5200 g_free (method_code);
5201 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5205 set_exception_object (MonoCompile *cfg, MonoException *exception)
5207 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5208 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5209 cfg->exception_ptr = exception;
5213 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5217 if (cfg->generic_sharing_context)
5218 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5220 type = &klass->byval_arg;
5221 return MONO_TYPE_IS_REFERENCE (type);
5225 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5228 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5229 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5230 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5231 /* Optimize reg-reg moves away */
5233 * Can't optimize other opcodes, since sp[0] might point to
5234 * the last ins of a decomposed opcode.
5236 sp [0]->dreg = (cfg)->locals [n]->dreg;
5238 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5243 * ldloca inhibits many optimizations so try to get rid of it in common
5246 static inline unsigned char *
5247 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5256 local = read16 (ip + 2);
5260 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5261 gboolean skip = FALSE;
5263 /* From the INITOBJ case */
5264 token = read32 (ip + 2);
5265 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5266 CHECK_TYPELOAD (klass);
5267 if (generic_class_is_reference_type (cfg, klass)) {
5268 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5269 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5270 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5271 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5272 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5285 is_exception_class (MonoClass *class)
5288 if (class == mono_defaults.exception_class)
5290 class = class->parent;
5296 * mono_method_to_ir:
5298 * Translate the .net IL into linear IR.
5301 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5302 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5303 guint inline_offset, gboolean is_virtual_call)
5306 MonoInst *ins, **sp, **stack_start;
5307 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5308 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5309 MonoMethod *cmethod, *method_definition;
5310 MonoInst **arg_array;
5311 MonoMethodHeader *header;
5313 guint32 token, ins_flag;
5315 MonoClass *constrained_call = NULL;
5316 unsigned char *ip, *end, *target, *err_pos;
5317 static double r8_0 = 0.0;
5318 MonoMethodSignature *sig;
5319 MonoGenericContext *generic_context = NULL;
5320 MonoGenericContainer *generic_container = NULL;
5321 MonoType **param_types;
5322 int i, n, start_new_bblock, dreg;
5323 int num_calls = 0, inline_costs = 0;
5324 int breakpoint_id = 0;
5326 MonoBoolean security, pinvoke;
5327 MonoSecurityManager* secman = NULL;
5328 MonoDeclSecurityActions actions;
5329 GSList *class_inits = NULL;
5330 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5332 gboolean init_locals, seq_points, skip_dead_blocks;
5334 /* serialization and xdomain stuff may need access to private fields and methods */
5335 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5336 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5337 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5338 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5339 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5340 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5342 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5344 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5345 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5346 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5347 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5349 image = method->klass->image;
5350 header = mono_method_get_header (method);
5352 MonoLoaderError *error;
5354 if ((error = mono_loader_get_last_error ())) {
5355 cfg->exception_type = error->exception_type;
5357 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5358 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5360 goto exception_exit;
5362 generic_container = mono_method_get_generic_container (method);
5363 sig = mono_method_signature (method);
5364 num_args = sig->hasthis + sig->param_count;
5365 ip = (unsigned char*)header->code;
5366 cfg->cil_start = ip;
5367 end = ip + header->code_size;
5368 mono_jit_stats.cil_code_size += header->code_size;
5369 init_locals = header->init_locals;
5371 seq_points = cfg->gen_seq_points && cfg->method == method;
5374 * Methods without init_locals set could cause asserts in various passes
5379 method_definition = method;
5380 while (method_definition->is_inflated) {
5381 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5382 method_definition = imethod->declaring;
5385 /* SkipVerification is not allowed if core-clr is enabled */
5386 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5388 dont_verify_stloc = TRUE;
5391 if (!dont_verify && mini_method_verify (cfg, method_definition))
5392 goto exception_exit;
5394 if (mono_debug_using_mono_debugger ())
5395 cfg->keep_cil_nops = TRUE;
5397 if (sig->is_inflated)
5398 generic_context = mono_method_get_context (method);
5399 else if (generic_container)
5400 generic_context = &generic_container->context;
5401 cfg->generic_context = generic_context;
5403 if (!cfg->generic_sharing_context)
5404 g_assert (!sig->has_type_parameters);
5406 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5407 g_assert (method->is_inflated);
5408 g_assert (mono_method_get_context (method)->method_inst);
5410 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5411 g_assert (sig->generic_param_count);
5413 if (cfg->method == method) {
5414 cfg->real_offset = 0;
5416 cfg->real_offset = inline_offset;
5419 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5420 cfg->cil_offset_to_bb_len = header->code_size;
5422 cfg->current_method = method;
5424 if (cfg->verbose_level > 2)
5425 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5427 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5429 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5430 for (n = 0; n < sig->param_count; ++n)
5431 param_types [n + sig->hasthis] = sig->params [n];
5432 cfg->arg_types = param_types;
5434 dont_inline = g_list_prepend (dont_inline, method);
5435 if (cfg->method == method) {
5437 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5438 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5441 NEW_BBLOCK (cfg, start_bblock);
5442 cfg->bb_entry = start_bblock;
5443 start_bblock->cil_code = NULL;
5444 start_bblock->cil_length = 0;
5447 NEW_BBLOCK (cfg, end_bblock);
5448 cfg->bb_exit = end_bblock;
5449 end_bblock->cil_code = NULL;
5450 end_bblock->cil_length = 0;
5451 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5452 g_assert (cfg->num_bblocks == 2);
5454 arg_array = cfg->args;
5456 if (header->num_clauses) {
5457 cfg->spvars = g_hash_table_new (NULL, NULL);
5458 cfg->exvars = g_hash_table_new (NULL, NULL);
5460 /* handle exception clauses */
5461 for (i = 0; i < header->num_clauses; ++i) {
5462 MonoBasicBlock *try_bb;
5463 MonoExceptionClause *clause = &header->clauses [i];
5464 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5465 try_bb->real_offset = clause->try_offset;
5466 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5467 tblock->real_offset = clause->handler_offset;
5468 tblock->flags |= BB_EXCEPTION_HANDLER;
5470 link_bblock (cfg, try_bb, tblock);
5472 if (*(ip + clause->handler_offset) == CEE_POP)
5473 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5475 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5476 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5477 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5478 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5479 MONO_ADD_INS (tblock, ins);
5481 /* todo: is a fault block unsafe to optimize? */
5482 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5483 tblock->flags |= BB_EXCEPTION_UNSAFE;
5487 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5489 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5491 /* catch and filter blocks get the exception object on the stack */
5492 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5493 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5494 MonoInst *dummy_use;
5496 /* mostly like handle_stack_args (), but just sets the input args */
5497 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5498 tblock->in_scount = 1;
5499 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5500 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5503 * Add a dummy use for the exvar so its liveness info will be
5507 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5509 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5510 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5511 tblock->flags |= BB_EXCEPTION_HANDLER;
5512 tblock->real_offset = clause->data.filter_offset;
5513 tblock->in_scount = 1;
5514 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5515 /* The filter block shares the exvar with the handler block */
5516 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5517 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5518 MONO_ADD_INS (tblock, ins);
5522 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5523 clause->data.catch_class &&
5524 cfg->generic_sharing_context &&
5525 mono_class_check_context_used (clause->data.catch_class)) {
5527 * In shared generic code with catch
5528 * clauses containing type variables
5529 * the exception handling code has to
5530 * be able to get to the rgctx.
5531 * Therefore we have to make sure that
5532 * the vtable/mrgctx argument (for
5533 * static or generic methods) or the
5534 * "this" argument (for non-static
5535 * methods) are live.
5537 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5538 mini_method_get_context (method)->method_inst ||
5539 method->klass->valuetype) {
5540 mono_get_vtable_var (cfg);
5542 MonoInst *dummy_use;
5544 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5549 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5550 cfg->cbb = start_bblock;
5551 cfg->args = arg_array;
5552 mono_save_args (cfg, sig, inline_args);
5555 /* FIRST CODE BLOCK */
5556 NEW_BBLOCK (cfg, bblock);
5557 bblock->cil_code = ip;
5561 ADD_BBLOCK (cfg, bblock);
5563 if (cfg->method == method) {
5564 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5565 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5566 MONO_INST_NEW (cfg, ins, OP_BREAK);
5567 MONO_ADD_INS (bblock, ins);
5571 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5572 secman = mono_security_manager_get_methods ();
5574 security = (secman && mono_method_has_declsec (method));
5575 /* at this point having security doesn't mean we have any code to generate */
5576 if (security && (cfg->method == method)) {
5577 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5578 * And we do not want to enter the next section (with allocation) if we
5579 * have nothing to generate */
5580 security = mono_declsec_get_demands (method, &actions);
5583 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5584 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5586 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5587 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5588 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5590 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5591 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5595 mono_custom_attrs_free (custom);
5598 custom = mono_custom_attrs_from_class (wrapped->klass);
5599 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5603 mono_custom_attrs_free (custom);
5606 /* not a P/Invoke after all */
5611 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5612 /* we use a separate basic block for the initialization code */
5613 NEW_BBLOCK (cfg, init_localsbb);
5614 cfg->bb_init = init_localsbb;
5615 init_localsbb->real_offset = cfg->real_offset;
5616 start_bblock->next_bb = init_localsbb;
5617 init_localsbb->next_bb = bblock;
5618 link_bblock (cfg, start_bblock, init_localsbb);
5619 link_bblock (cfg, init_localsbb, bblock);
5621 cfg->cbb = init_localsbb;
5623 start_bblock->next_bb = bblock;
5624 link_bblock (cfg, start_bblock, bblock);
5627 /* at this point we know, if security is TRUE, that some code needs to be generated */
5628 if (security && (cfg->method == method)) {
5631 mono_jit_stats.cas_demand_generation++;
5633 if (actions.demand.blob) {
5634 /* Add code for SecurityAction.Demand */
5635 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5636 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5637 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5638 mono_emit_method_call (cfg, secman->demand, args, NULL);
5640 if (actions.noncasdemand.blob) {
5641 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5642 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5643 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5644 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5645 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5646 mono_emit_method_call (cfg, secman->demand, args, NULL);
5648 if (actions.demandchoice.blob) {
5649 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5650 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5651 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5652 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5653 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5657 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5659 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5662 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5663 /* check if this is native code, e.g. an icall or a p/invoke */
5664 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5665 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5667 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5668 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5670 /* if this ia a native call then it can only be JITted from platform code */
5671 if ((icall || pinvk) && method->klass && method->klass->image) {
5672 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5673 MonoException *ex = icall ? mono_get_exception_security () :
5674 mono_get_exception_method_access ();
5675 emit_throw_exception (cfg, ex);
5682 if (header->code_size == 0)
5685 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5690 if (cfg->method == method)
5691 mono_debug_init_method (cfg, bblock, breakpoint_id);
5693 for (n = 0; n < header->num_locals; ++n) {
5694 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5699 /* We force the vtable variable here for all shared methods
5700 for the possibility that they might show up in a stack
5701 trace where their exact instantiation is needed. */
5702 if (cfg->generic_sharing_context && method == cfg->method) {
5703 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5704 mini_method_get_context (method)->method_inst ||
5705 method->klass->valuetype) {
5706 mono_get_vtable_var (cfg);
5708 /* FIXME: Is there a better way to do this?
5709 We need the variable live for the duration
5710 of the whole method. */
5711 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5715 /* add a check for this != NULL to inlined methods */
5716 if (is_virtual_call) {
5719 NEW_ARGLOAD (cfg, arg_ins, 0);
5720 MONO_ADD_INS (cfg->cbb, arg_ins);
5721 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5724 skip_dead_blocks = !dont_verify;
5725 if (skip_dead_blocks) {
5726 original_bb = bb = mono_basic_block_split (method, &error);
5727 if (!mono_error_ok (&error)) {
5728 mono_error_cleanup (&error);
5734 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5735 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5738 start_new_bblock = 0;
5741 if (cfg->method == method)
5742 cfg->real_offset = ip - header->code;
5744 cfg->real_offset = inline_offset;
5749 if (start_new_bblock) {
5750 bblock->cil_length = ip - bblock->cil_code;
5751 if (start_new_bblock == 2) {
5752 g_assert (ip == tblock->cil_code);
5754 GET_BBLOCK (cfg, tblock, ip);
5756 bblock->next_bb = tblock;
5759 start_new_bblock = 0;
5760 for (i = 0; i < bblock->in_scount; ++i) {
5761 if (cfg->verbose_level > 3)
5762 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5763 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5767 g_slist_free (class_inits);
5770 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5771 link_bblock (cfg, bblock, tblock);
5772 if (sp != stack_start) {
5773 handle_stack_args (cfg, stack_start, sp - stack_start);
5775 CHECK_UNVERIFIABLE (cfg);
5777 bblock->next_bb = tblock;
5780 for (i = 0; i < bblock->in_scount; ++i) {
5781 if (cfg->verbose_level > 3)
5782 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5783 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5786 g_slist_free (class_inits);
5791 if (skip_dead_blocks) {
5792 int ip_offset = ip - header->code;
5794 if (ip_offset == bb->end)
5798 int op_size = mono_opcode_size (ip, end);
5799 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5801 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5803 if (ip_offset + op_size == bb->end) {
5804 MONO_INST_NEW (cfg, ins, OP_NOP);
5805 MONO_ADD_INS (bblock, ins);
5806 start_new_bblock = 1;
5814 * Sequence points are points where the debugger can place a breakpoint.
5815 * Currently, we generate these automatically at points where the IL
5818 if (seq_points && sp == stack_start) {
5819 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5820 MONO_ADD_INS (cfg->cbb, ins);
5823 bblock->real_offset = cfg->real_offset;
5825 if ((cfg->method == method) && cfg->coverage_info) {
5826 guint32 cil_offset = ip - header->code;
5827 cfg->coverage_info->data [cil_offset].cil_code = ip;
5829 /* TODO: Use an increment here */
5830 #if defined(TARGET_X86)
5831 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5832 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5834 MONO_ADD_INS (cfg->cbb, ins);
5836 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5837 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5841 if (cfg->verbose_level > 3)
5842 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5846 if (cfg->keep_cil_nops)
5847 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5849 MONO_INST_NEW (cfg, ins, OP_NOP);
5851 MONO_ADD_INS (bblock, ins);
5854 if (should_insert_brekpoint (cfg->method))
5855 MONO_INST_NEW (cfg, ins, OP_BREAK);
5857 MONO_INST_NEW (cfg, ins, OP_NOP);
5859 MONO_ADD_INS (bblock, ins);
5865 CHECK_STACK_OVF (1);
5866 n = (*ip)-CEE_LDARG_0;
5868 EMIT_NEW_ARGLOAD (cfg, ins, n);
5876 CHECK_STACK_OVF (1);
5877 n = (*ip)-CEE_LDLOC_0;
5879 EMIT_NEW_LOCLOAD (cfg, ins, n);
5888 n = (*ip)-CEE_STLOC_0;
5891 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5893 emit_stloc_ir (cfg, sp, header, n);
5900 CHECK_STACK_OVF (1);
5903 EMIT_NEW_ARGLOAD (cfg, ins, n);
5909 CHECK_STACK_OVF (1);
5912 NEW_ARGLOADA (cfg, ins, n);
5913 MONO_ADD_INS (cfg->cbb, ins);
5923 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5925 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5930 CHECK_STACK_OVF (1);
5933 EMIT_NEW_LOCLOAD (cfg, ins, n);
5937 case CEE_LDLOCA_S: {
5938 unsigned char *tmp_ip;
5940 CHECK_STACK_OVF (1);
5941 CHECK_LOCAL (ip [1]);
5943 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5949 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5958 CHECK_LOCAL (ip [1]);
5959 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5961 emit_stloc_ir (cfg, sp, header, ip [1]);
5966 CHECK_STACK_OVF (1);
5967 EMIT_NEW_PCONST (cfg, ins, NULL);
5968 ins->type = STACK_OBJ;
5973 CHECK_STACK_OVF (1);
5974 EMIT_NEW_ICONST (cfg, ins, -1);
5987 CHECK_STACK_OVF (1);
5988 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5994 CHECK_STACK_OVF (1);
5996 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6002 CHECK_STACK_OVF (1);
6003 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6009 CHECK_STACK_OVF (1);
6010 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6011 ins->type = STACK_I8;
6012 ins->dreg = alloc_dreg (cfg, STACK_I8);
6014 ins->inst_l = (gint64)read64 (ip);
6015 MONO_ADD_INS (bblock, ins);
6021 gboolean use_aotconst = FALSE;
6023 #ifdef TARGET_POWERPC
6024 /* FIXME: Clean this up */
6025 if (cfg->compile_aot)
6026 use_aotconst = TRUE;
6029 /* FIXME: we should really allocate this only late in the compilation process */
6030 f = mono_domain_alloc (cfg->domain, sizeof (float));
6032 CHECK_STACK_OVF (1);
6038 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6040 dreg = alloc_freg (cfg);
6041 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6042 ins->type = STACK_R8;
6044 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6045 ins->type = STACK_R8;
6046 ins->dreg = alloc_dreg (cfg, STACK_R8);
6048 MONO_ADD_INS (bblock, ins);
6058 gboolean use_aotconst = FALSE;
6060 #ifdef TARGET_POWERPC
6061 /* FIXME: Clean this up */
6062 if (cfg->compile_aot)
6063 use_aotconst = TRUE;
6066 /* FIXME: we should really allocate this only late in the compilation process */
6067 d = mono_domain_alloc (cfg->domain, sizeof (double));
6069 CHECK_STACK_OVF (1);
6075 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6077 dreg = alloc_freg (cfg);
6078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6079 ins->type = STACK_R8;
6081 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6082 ins->type = STACK_R8;
6083 ins->dreg = alloc_dreg (cfg, STACK_R8);
6085 MONO_ADD_INS (bblock, ins);
6094 MonoInst *temp, *store;
6096 CHECK_STACK_OVF (1);
6100 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6101 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6103 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6106 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6119 if (sp [0]->type == STACK_R8)
6120 /* we need to pop the value from the x86 FP stack */
6121 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6130 if (stack_start != sp)
6132 token = read32 (ip + 1);
6133 /* FIXME: check the signature matches */
6134 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6136 if (!cmethod || mono_loader_get_last_error ())
6139 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6140 GENERIC_SHARING_FAILURE (CEE_JMP);
6142 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6143 CHECK_CFG_EXCEPTION;
6145 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6147 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6150 /* Handle tail calls similarly to calls */
6151 n = fsig->param_count + fsig->hasthis;
6153 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6154 call->method = cmethod;
6155 call->tail_call = TRUE;
6156 call->signature = mono_method_signature (cmethod);
6157 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6158 call->inst.inst_p0 = cmethod;
6159 for (i = 0; i < n; ++i)
6160 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6162 mono_arch_emit_call (cfg, call);
6163 MONO_ADD_INS (bblock, (MonoInst*)call);
6166 for (i = 0; i < num_args; ++i)
6167 /* Prevent arguments from being optimized away */
6168 arg_array [i]->flags |= MONO_INST_VOLATILE;
6170 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6171 ins = (MonoInst*)call;
6172 ins->inst_p0 = cmethod;
6173 MONO_ADD_INS (bblock, ins);
6177 start_new_bblock = 1;
6182 case CEE_CALLVIRT: {
6183 MonoInst *addr = NULL;
6184 MonoMethodSignature *fsig = NULL;
6186 int virtual = *ip == CEE_CALLVIRT;
6187 int calli = *ip == CEE_CALLI;
6188 gboolean pass_imt_from_rgctx = FALSE;
6189 MonoInst *imt_arg = NULL;
6190 gboolean pass_vtable = FALSE;
6191 gboolean pass_mrgctx = FALSE;
6192 MonoInst *vtable_arg = NULL;
6193 gboolean check_this = FALSE;
6194 gboolean supported_tail_call = FALSE;
6197 token = read32 (ip + 1);
6204 if (method->wrapper_type != MONO_WRAPPER_NONE)
6205 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6207 fsig = mono_metadata_parse_signature (image, token);
6209 n = fsig->param_count + fsig->hasthis;
6211 if (method->dynamic && fsig->pinvoke) {
6215 * This is a call through a function pointer using a pinvoke
6216 * signature. Have to create a wrapper and call that instead.
6217 * FIXME: This is very slow, need to create a wrapper at JIT time
6218 * instead based on the signature.
6220 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6221 EMIT_NEW_PCONST (cfg, args [1], fsig);
6223 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6226 MonoMethod *cil_method;
6228 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6229 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6230 cil_method = cmethod;
6231 } else if (constrained_call) {
6232 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6234 * This is needed since get_method_constrained can't find
6235 * the method in klass representing a type var.
6236 * The type var is guaranteed to be a reference type in this
6239 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6240 cil_method = cmethod;
6241 g_assert (!cmethod->klass->valuetype);
6243 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6246 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6247 cil_method = cmethod;
6250 if (!cmethod || mono_loader_get_last_error ())
6252 if (!dont_verify && !cfg->skip_visibility) {
6253 MonoMethod *target_method = cil_method;
6254 if (method->is_inflated) {
6255 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6257 if (!mono_method_can_access_method (method_definition, target_method) &&
6258 !mono_method_can_access_method (method, cil_method))
6259 METHOD_ACCESS_FAILURE;
6262 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6263 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6265 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6266 /* MS.NET seems to silently convert this to a callvirt */
6271 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6272 * converts to a callvirt.
6274 * tests/bug-515884.il is an example of this behavior
6276 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6277 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6278 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6282 if (!cmethod->klass->inited)
6283 if (!mono_class_init (cmethod->klass))
6286 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6287 mini_class_is_system_array (cmethod->klass)) {
6288 array_rank = cmethod->klass->rank;
6289 fsig = mono_method_signature (cmethod);
6291 fsig = mono_method_signature (cmethod);
6296 if (fsig->pinvoke) {
6297 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6298 check_for_pending_exc, FALSE);
6299 fsig = mono_method_signature (wrapper);
6300 } else if (constrained_call) {
6301 fsig = mono_method_signature (cmethod);
6303 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6307 mono_save_token_info (cfg, image, token, cil_method);
6309 n = fsig->param_count + fsig->hasthis;
6311 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6312 if (check_linkdemand (cfg, method, cmethod))
6314 CHECK_CFG_EXCEPTION;
6317 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6318 g_assert_not_reached ();
6321 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6324 if (!cfg->generic_sharing_context && cmethod)
6325 g_assert (!mono_method_check_context_used (cmethod));
6329 //g_assert (!virtual || fsig->hasthis);
6333 if (constrained_call) {
6335 * We have the `constrained.' prefix opcode.
6337 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6339 * The type parameter is instantiated as a valuetype,
6340 * but that type doesn't override the method we're
6341 * calling, so we need to box `this'.
6343 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6344 ins->klass = constrained_call;
6345 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6346 CHECK_CFG_EXCEPTION;
6347 } else if (!constrained_call->valuetype) {
6348 int dreg = alloc_preg (cfg);
6351 * The type parameter is instantiated as a reference
6352 * type. We have a managed pointer on the stack, so
6353 * we need to dereference it here.
6355 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6356 ins->type = STACK_OBJ;
6358 } else if (cmethod->klass->valuetype)
6360 constrained_call = NULL;
6363 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6367 * If the callee is a shared method, then its static cctor
6368 * might not get called after the call was patched.
6370 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6371 emit_generic_class_init (cfg, cmethod->klass);
6372 CHECK_TYPELOAD (cmethod->klass);
6375 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6376 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6377 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6378 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6379 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6382 * Pass vtable iff target method might
6383 * be shared, which means that sharing
6384 * is enabled for its class and its
6385 * context is sharable (and it's not a
6388 if (sharing_enabled && context_sharable &&
6389 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6393 if (cmethod && mini_method_get_context (cmethod) &&
6394 mini_method_get_context (cmethod)->method_inst) {
6395 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6396 MonoGenericContext *context = mini_method_get_context (cmethod);
6397 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6399 g_assert (!pass_vtable);
6401 if (sharing_enabled && context_sharable)
6405 if (cfg->generic_sharing_context && cmethod) {
6406 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6408 context_used = mono_method_check_context_used (cmethod);
6410 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6411 /* Generic method interface
6412 calls are resolved via a
6413 helper function and don't
6415 if (!cmethod_context || !cmethod_context->method_inst)
6416 pass_imt_from_rgctx = TRUE;
6420 * If a shared method calls another
6421 * shared method then the caller must
6422 * have a generic sharing context
6423 * because the magic trampoline
6424 * requires it. FIXME: We shouldn't
6425 * have to force the vtable/mrgctx
6426 * variable here. Instead there
6427 * should be a flag in the cfg to
6428 * request a generic sharing context.
6431 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6432 mono_get_vtable_var (cfg);
6437 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6439 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6441 CHECK_TYPELOAD (cmethod->klass);
6442 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6447 g_assert (!vtable_arg);
6449 if (!cfg->compile_aot) {
6451 * emit_get_rgctx_method () calls mono_class_vtable () so check
6452 * for type load errors before.
6454 mono_class_setup_vtable (cmethod->klass);
6455 CHECK_TYPELOAD (cmethod->klass);
6458 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6460 /* !marshalbyref is needed to properly handle generic methods + remoting */
6461 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6462 MONO_METHOD_IS_FINAL (cmethod)) &&
6463 !cmethod->klass->marshalbyref) {
6470 if (pass_imt_from_rgctx) {
6471 g_assert (!pass_vtable);
6474 imt_arg = emit_get_rgctx_method (cfg, context_used,
6475 cmethod, MONO_RGCTX_INFO_METHOD);
6479 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6481 /* Calling virtual generic methods */
6482 if (cmethod && virtual &&
6483 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6484 !(MONO_METHOD_IS_FINAL (cmethod) &&
6485 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6486 mono_method_signature (cmethod)->generic_param_count) {
6487 MonoInst *this_temp, *this_arg_temp, *store;
6488 MonoInst *iargs [4];
6490 g_assert (mono_method_signature (cmethod)->is_inflated);
6492 /* Prevent inlining of methods that contain indirect calls */
6495 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6496 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6497 g_assert (!imt_arg);
6499 g_assert (cmethod->is_inflated);
6500 imt_arg = emit_get_rgctx_method (cfg, context_used,
6501 cmethod, MONO_RGCTX_INFO_METHOD);
6502 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6506 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6507 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6508 MONO_ADD_INS (bblock, store);
6510 /* FIXME: This should be a managed pointer */
6511 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6513 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6514 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6515 cmethod, MONO_RGCTX_INFO_METHOD);
6516 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6517 addr = mono_emit_jit_icall (cfg,
6518 mono_helper_compile_generic_method, iargs);
6520 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6522 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6525 if (!MONO_TYPE_IS_VOID (fsig->ret))
6526 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6528 CHECK_CFG_EXCEPTION;
6535 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6536 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6538 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6542 /* FIXME: runtime generic context pointer for jumps? */
6543 /* FIXME: handle this for generic sharing eventually */
6544 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6547 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6550 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6551 /* Handle tail calls similarly to calls */
6552 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6554 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6555 call->tail_call = TRUE;
6556 call->method = cmethod;
6557 call->signature = mono_method_signature (cmethod);
6560 * We implement tail calls by storing the actual arguments into the
6561 * argument variables, then emitting a CEE_JMP.
6563 for (i = 0; i < n; ++i) {
6564 /* Prevent argument from being register allocated */
6565 arg_array [i]->flags |= MONO_INST_VOLATILE;
6566 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6570 ins = (MonoInst*)call;
6571 ins->inst_p0 = cmethod;
6572 ins->inst_p1 = arg_array [0];
6573 MONO_ADD_INS (bblock, ins);
6574 link_bblock (cfg, bblock, end_bblock);
6575 start_new_bblock = 1;
6577 CHECK_CFG_EXCEPTION;
6579 /* skip CEE_RET as well */
6585 /* Conversion to a JIT intrinsic */
6586 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6588 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6589 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6594 CHECK_CFG_EXCEPTION;
6602 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6603 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6604 mono_method_check_inlining (cfg, cmethod) &&
6605 !g_list_find (dont_inline, cmethod)) {
6607 gboolean allways = FALSE;
6609 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6610 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6611 /* Prevent inlining of methods that call wrappers */
6613 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6617 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6619 cfg->real_offset += 5;
6622 if (!MONO_TYPE_IS_VOID (fsig->ret))
6623 /* *sp is already set by inline_method */
6626 inline_costs += costs;
6632 inline_costs += 10 * num_calls++;
6634 /* Tail recursion elimination */
6635 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6636 gboolean has_vtargs = FALSE;
6639 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6642 /* keep it simple */
6643 for (i = fsig->param_count - 1; i >= 0; i--) {
6644 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6649 for (i = 0; i < n; ++i)
6650 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6651 MONO_INST_NEW (cfg, ins, OP_BR);
6652 MONO_ADD_INS (bblock, ins);
6653 tblock = start_bblock->out_bb [0];
6654 link_bblock (cfg, bblock, tblock);
6655 ins->inst_target_bb = tblock;
6656 start_new_bblock = 1;
6658 /* skip the CEE_RET, too */
6659 if (ip_in_bb (cfg, bblock, ip + 5))
6669 /* Generic sharing */
6670 /* FIXME: only do this for generic methods if
6671 they are not shared! */
6672 if (context_used && !imt_arg && !array_rank &&
6673 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6674 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6675 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6676 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6679 g_assert (cfg->generic_sharing_context && cmethod);
6683 * We are compiling a call to a
6684 * generic method from shared code,
6685 * which means that we have to look up
6686 * the method in the rgctx and do an
6689 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6692 /* Indirect calls */
6694 g_assert (!imt_arg);
6696 if (*ip == CEE_CALL)
6697 g_assert (context_used);
6698 else if (*ip == CEE_CALLI)
6699 g_assert (!vtable_arg);
6701 /* FIXME: what the hell is this??? */
6702 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6703 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6705 /* Prevent inlining of methods with indirect calls */
6710 int rgctx_reg = mono_alloc_preg (cfg);
6712 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6713 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6714 call = (MonoCallInst*)ins;
6715 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6717 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6719 * Instead of emitting an indirect call, emit a direct call
6720 * with the contents of the aotconst as the patch info.
6722 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6724 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6725 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6728 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6731 if (!MONO_TYPE_IS_VOID (fsig->ret))
6732 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6734 CHECK_CFG_EXCEPTION;
6745 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6746 if (sp [fsig->param_count]->type == STACK_OBJ) {
6747 MonoInst *iargs [2];
6750 iargs [1] = sp [fsig->param_count];
6752 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6755 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6756 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6757 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6758 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6760 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6763 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6764 if (!cmethod->klass->element_class->valuetype && !readonly)
6765 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6766 CHECK_TYPELOAD (cmethod->klass);
6769 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6772 g_assert_not_reached ();
6775 CHECK_CFG_EXCEPTION;
6782 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6784 if (!MONO_TYPE_IS_VOID (fsig->ret))
6785 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6787 CHECK_CFG_EXCEPTION;
6797 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6799 } else if (imt_arg) {
6800 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6802 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6805 if (!MONO_TYPE_IS_VOID (fsig->ret))
6806 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6808 CHECK_CFG_EXCEPTION;
6815 if (cfg->method != method) {
6816 /* return from inlined method */
6818 * If in_count == 0, that means the ret is unreachable due to
6819 * being preceeded by a throw. In that case, inline_method () will
6820 * handle setting the return value
6821 * (test case: test_0_inline_throw ()).
6823 if (return_var && cfg->cbb->in_count) {
6827 //g_assert (returnvar != -1);
6828 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6829 cfg->ret_var_set = TRUE;
6833 MonoType *ret_type = mono_method_signature (method)->ret;
6837 * Place a seq point here too even through the IL stack is not
6838 * empty, so a step over on
6841 * will work correctly.
6843 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6844 MONO_ADD_INS (cfg->cbb, ins);
6847 g_assert (!return_var);
6850 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6853 if (!cfg->vret_addr) {
6856 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6858 EMIT_NEW_RETLOADA (cfg, ret_addr);
6860 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6861 ins->klass = mono_class_from_mono_type (ret_type);
6864 #ifdef MONO_ARCH_SOFT_FLOAT
6865 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6866 MonoInst *iargs [1];
6870 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6871 mono_arch_emit_setret (cfg, method, conv);
6873 mono_arch_emit_setret (cfg, method, *sp);
6876 mono_arch_emit_setret (cfg, method, *sp);
6881 if (sp != stack_start)
6883 MONO_INST_NEW (cfg, ins, OP_BR);
6885 ins->inst_target_bb = end_bblock;
6886 MONO_ADD_INS (bblock, ins);
6887 link_bblock (cfg, bblock, end_bblock);
6888 start_new_bblock = 1;
6892 MONO_INST_NEW (cfg, ins, OP_BR);
6894 target = ip + 1 + (signed char)(*ip);
6896 GET_BBLOCK (cfg, tblock, target);
6897 link_bblock (cfg, bblock, tblock);
6898 ins->inst_target_bb = tblock;
6899 if (sp != stack_start) {
6900 handle_stack_args (cfg, stack_start, sp - stack_start);
6902 CHECK_UNVERIFIABLE (cfg);
6904 MONO_ADD_INS (bblock, ins);
6905 start_new_bblock = 1;
6906 inline_costs += BRANCH_COST;
6920 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6922 target = ip + 1 + *(signed char*)ip;
6928 inline_costs += BRANCH_COST;
6932 MONO_INST_NEW (cfg, ins, OP_BR);
6935 target = ip + 4 + (gint32)read32(ip);
6937 GET_BBLOCK (cfg, tblock, target);
6938 link_bblock (cfg, bblock, tblock);
6939 ins->inst_target_bb = tblock;
6940 if (sp != stack_start) {
6941 handle_stack_args (cfg, stack_start, sp - stack_start);
6943 CHECK_UNVERIFIABLE (cfg);
6946 MONO_ADD_INS (bblock, ins);
6948 start_new_bblock = 1;
6949 inline_costs += BRANCH_COST;
6956 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6957 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6958 guint32 opsize = is_short ? 1 : 4;
6960 CHECK_OPSIZE (opsize);
6962 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6965 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6970 GET_BBLOCK (cfg, tblock, target);
6971 link_bblock (cfg, bblock, tblock);
6972 GET_BBLOCK (cfg, tblock, ip);
6973 link_bblock (cfg, bblock, tblock);
6975 if (sp != stack_start) {
6976 handle_stack_args (cfg, stack_start, sp - stack_start);
6977 CHECK_UNVERIFIABLE (cfg);
6980 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6981 cmp->sreg1 = sp [0]->dreg;
6982 type_from_op (cmp, sp [0], NULL);
6985 #if SIZEOF_REGISTER == 4
6986 if (cmp->opcode == OP_LCOMPARE_IMM) {
6987 /* Convert it to OP_LCOMPARE */
6988 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6989 ins->type = STACK_I8;
6990 ins->dreg = alloc_dreg (cfg, STACK_I8);
6992 MONO_ADD_INS (bblock, ins);
6993 cmp->opcode = OP_LCOMPARE;
6994 cmp->sreg2 = ins->dreg;
6997 MONO_ADD_INS (bblock, cmp);
6999 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7000 type_from_op (ins, sp [0], NULL);
7001 MONO_ADD_INS (bblock, ins);
7002 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7003 GET_BBLOCK (cfg, tblock, target);
7004 ins->inst_true_bb = tblock;
7005 GET_BBLOCK (cfg, tblock, ip);
7006 ins->inst_false_bb = tblock;
7007 start_new_bblock = 2;
7010 inline_costs += BRANCH_COST;
7025 MONO_INST_NEW (cfg, ins, *ip);
7027 target = ip + 4 + (gint32)read32(ip);
7033 inline_costs += BRANCH_COST;
7037 MonoBasicBlock **targets;
7038 MonoBasicBlock *default_bblock;
7039 MonoJumpInfoBBTable *table;
7040 int offset_reg = alloc_preg (cfg);
7041 int target_reg = alloc_preg (cfg);
7042 int table_reg = alloc_preg (cfg);
7043 int sum_reg = alloc_preg (cfg);
7044 gboolean use_op_switch;
7048 n = read32 (ip + 1);
7051 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7055 CHECK_OPSIZE (n * sizeof (guint32));
7056 target = ip + n * sizeof (guint32);
7058 GET_BBLOCK (cfg, default_bblock, target);
7059 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7061 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7062 for (i = 0; i < n; ++i) {
7063 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7064 targets [i] = tblock;
7065 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7069 if (sp != stack_start) {
7071 * Link the current bb with the targets as well, so handle_stack_args
7072 * will set their in_stack correctly.
7074 link_bblock (cfg, bblock, default_bblock);
7075 for (i = 0; i < n; ++i)
7076 link_bblock (cfg, bblock, targets [i]);
7078 handle_stack_args (cfg, stack_start, sp - stack_start);
7080 CHECK_UNVERIFIABLE (cfg);
7083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7087 for (i = 0; i < n; ++i)
7088 link_bblock (cfg, bblock, targets [i]);
7090 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7091 table->table = targets;
7092 table->table_size = n;
7094 use_op_switch = FALSE;
7096 /* ARM implements SWITCH statements differently */
7097 /* FIXME: Make it use the generic implementation */
7098 if (!cfg->compile_aot)
7099 use_op_switch = TRUE;
7102 if (COMPILE_LLVM (cfg))
7103 use_op_switch = TRUE;
7105 cfg->cbb->has_jump_table = 1;
7107 if (use_op_switch) {
7108 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7109 ins->sreg1 = src1->dreg;
7110 ins->inst_p0 = table;
7111 ins->inst_many_bb = targets;
7112 ins->klass = GUINT_TO_POINTER (n);
7113 MONO_ADD_INS (cfg->cbb, ins);
7115 if (sizeof (gpointer) == 8)
7116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7118 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7120 #if SIZEOF_REGISTER == 8
7121 /* The upper word might not be zero, and we add it to a 64 bit address later */
7122 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7125 if (cfg->compile_aot) {
7126 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7128 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7129 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7130 ins->inst_p0 = table;
7131 ins->dreg = table_reg;
7132 MONO_ADD_INS (cfg->cbb, ins);
7135 /* FIXME: Use load_memindex */
7136 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7138 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7140 start_new_bblock = 1;
7141 inline_costs += (BRANCH_COST * 2);
7161 dreg = alloc_freg (cfg);
7164 dreg = alloc_lreg (cfg);
7167 dreg = alloc_preg (cfg);
7170 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7171 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7172 ins->flags |= ins_flag;
7174 MONO_ADD_INS (bblock, ins);
7189 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7190 ins->flags |= ins_flag;
7192 MONO_ADD_INS (bblock, ins);
7194 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7195 emit_write_barrier (cfg, sp [0], sp [1], -1);
7204 MONO_INST_NEW (cfg, ins, (*ip));
7206 ins->sreg1 = sp [0]->dreg;
7207 ins->sreg2 = sp [1]->dreg;
7208 type_from_op (ins, sp [0], sp [1]);
7210 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7212 /* Use the immediate opcodes if possible */
7213 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7214 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7215 if (imm_opcode != -1) {
7216 ins->opcode = imm_opcode;
7217 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7220 sp [1]->opcode = OP_NOP;
7224 MONO_ADD_INS ((cfg)->cbb, (ins));
7226 *sp++ = mono_decompose_opcode (cfg, ins);
7243 MONO_INST_NEW (cfg, ins, (*ip));
7245 ins->sreg1 = sp [0]->dreg;
7246 ins->sreg2 = sp [1]->dreg;
7247 type_from_op (ins, sp [0], sp [1]);
7249 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7250 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7252 /* FIXME: Pass opcode to is_inst_imm */
7254 /* Use the immediate opcodes if possible */
7255 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7258 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7259 if (imm_opcode != -1) {
7260 ins->opcode = imm_opcode;
7261 if (sp [1]->opcode == OP_I8CONST) {
7262 #if SIZEOF_REGISTER == 8
7263 ins->inst_imm = sp [1]->inst_l;
7265 ins->inst_ls_word = sp [1]->inst_ls_word;
7266 ins->inst_ms_word = sp [1]->inst_ms_word;
7270 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7273 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7274 if (sp [1]->next == NULL)
7275 sp [1]->opcode = OP_NOP;
7278 MONO_ADD_INS ((cfg)->cbb, (ins));
7280 *sp++ = mono_decompose_opcode (cfg, ins);
7293 case CEE_CONV_OVF_I8:
7294 case CEE_CONV_OVF_U8:
7298 /* Special case this earlier so we have long constants in the IR */
7299 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7300 int data = sp [-1]->inst_c0;
7301 sp [-1]->opcode = OP_I8CONST;
7302 sp [-1]->type = STACK_I8;
7303 #if SIZEOF_REGISTER == 8
7304 if ((*ip) == CEE_CONV_U8)
7305 sp [-1]->inst_c0 = (guint32)data;
7307 sp [-1]->inst_c0 = data;
7309 sp [-1]->inst_ls_word = data;
7310 if ((*ip) == CEE_CONV_U8)
7311 sp [-1]->inst_ms_word = 0;
7313 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7315 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7322 case CEE_CONV_OVF_I4:
7323 case CEE_CONV_OVF_I1:
7324 case CEE_CONV_OVF_I2:
7325 case CEE_CONV_OVF_I:
7326 case CEE_CONV_OVF_U:
7329 if (sp [-1]->type == STACK_R8) {
7330 ADD_UNOP (CEE_CONV_OVF_I8);
7337 case CEE_CONV_OVF_U1:
7338 case CEE_CONV_OVF_U2:
7339 case CEE_CONV_OVF_U4:
7342 if (sp [-1]->type == STACK_R8) {
7343 ADD_UNOP (CEE_CONV_OVF_U8);
7350 case CEE_CONV_OVF_I1_UN:
7351 case CEE_CONV_OVF_I2_UN:
7352 case CEE_CONV_OVF_I4_UN:
7353 case CEE_CONV_OVF_I8_UN:
7354 case CEE_CONV_OVF_U1_UN:
7355 case CEE_CONV_OVF_U2_UN:
7356 case CEE_CONV_OVF_U4_UN:
7357 case CEE_CONV_OVF_U8_UN:
7358 case CEE_CONV_OVF_I_UN:
7359 case CEE_CONV_OVF_U_UN:
7366 CHECK_CFG_EXCEPTION;
7370 case CEE_ADD_OVF_UN:
7372 case CEE_MUL_OVF_UN:
7374 case CEE_SUB_OVF_UN:
7382 token = read32 (ip + 1);
7383 klass = mini_get_class (method, token, generic_context);
7384 CHECK_TYPELOAD (klass);
7386 if (generic_class_is_reference_type (cfg, klass)) {
7387 MonoInst *store, *load;
7388 int dreg = alloc_preg (cfg);
7390 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7391 load->flags |= ins_flag;
7392 MONO_ADD_INS (cfg->cbb, load);
7394 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7395 store->flags |= ins_flag;
7396 MONO_ADD_INS (cfg->cbb, store);
7398 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7399 emit_write_barrier (cfg, sp [0], sp [1], -1);
7401 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7413 token = read32 (ip + 1);
7414 klass = mini_get_class (method, token, generic_context);
7415 CHECK_TYPELOAD (klass);
7417 /* Optimize the common ldobj+stloc combination */
7427 loc_index = ip [5] - CEE_STLOC_0;
7434 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7435 CHECK_LOCAL (loc_index);
7437 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7438 ins->dreg = cfg->locals [loc_index]->dreg;
7444 /* Optimize the ldobj+stobj combination */
7445 /* The reference case ends up being a load+store anyway */
7446 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7451 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7458 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7467 CHECK_STACK_OVF (1);
7469 n = read32 (ip + 1);
7471 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7472 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7473 ins->type = STACK_OBJ;
7476 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7477 MonoInst *iargs [1];
7479 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7480 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7482 if (cfg->opt & MONO_OPT_SHARED) {
7483 MonoInst *iargs [3];
7485 if (cfg->compile_aot) {
7486 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7488 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7489 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7490 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7491 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7492 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7494 if (bblock->out_of_line) {
7495 MonoInst *iargs [2];
7497 if (image == mono_defaults.corlib) {
7499 * Avoid relocations in AOT and save some space by using a
7500 * version of helper_ldstr specialized to mscorlib.
7502 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7503 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7505 /* Avoid creating the string object */
7506 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7507 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7508 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7512 if (cfg->compile_aot) {
7513 NEW_LDSTRCONST (cfg, ins, image, n);
7515 MONO_ADD_INS (bblock, ins);
7518 NEW_PCONST (cfg, ins, NULL);
7519 ins->type = STACK_OBJ;
7520 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7522 MONO_ADD_INS (bblock, ins);
7531 MonoInst *iargs [2];
7532 MonoMethodSignature *fsig;
7535 MonoInst *vtable_arg = NULL;
7538 token = read32 (ip + 1);
7539 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7540 if (!cmethod || mono_loader_get_last_error ())
7542 fsig = mono_method_get_signature (cmethod, image, token);
7546 mono_save_token_info (cfg, image, token, cmethod);
7548 if (!mono_class_init (cmethod->klass))
7551 if (cfg->generic_sharing_context)
7552 context_used = mono_method_check_context_used (cmethod);
7554 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7555 if (check_linkdemand (cfg, method, cmethod))
7557 CHECK_CFG_EXCEPTION;
7558 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7559 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7562 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7563 emit_generic_class_init (cfg, cmethod->klass);
7564 CHECK_TYPELOAD (cmethod->klass);
7567 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7568 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7569 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7570 mono_class_vtable (cfg->domain, cmethod->klass);
7571 CHECK_TYPELOAD (cmethod->klass);
7573 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7574 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7577 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7578 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7580 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7582 CHECK_TYPELOAD (cmethod->klass);
7583 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7588 n = fsig->param_count;
7592 * Generate smaller code for the common newobj <exception> instruction in
7593 * argument checking code.
7595 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7596 is_exception_class (cmethod->klass) && n <= 2 &&
7597 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7598 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7599 MonoInst *iargs [3];
7601 g_assert (!vtable_arg);
7605 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7608 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7612 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7617 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7620 g_assert_not_reached ();
7628 /* move the args to allow room for 'this' in the first position */
7634 /* check_call_signature () requires sp[0] to be set */
7635 this_ins.type = STACK_OBJ;
7637 if (check_call_signature (cfg, fsig, sp))
7642 if (mini_class_is_system_array (cmethod->klass)) {
7643 g_assert (!vtable_arg);
7645 *sp = emit_get_rgctx_method (cfg, context_used,
7646 cmethod, MONO_RGCTX_INFO_METHOD);
7648 /* Avoid varargs in the common case */
7649 if (fsig->param_count == 1)
7650 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7651 else if (fsig->param_count == 2)
7652 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7653 else if (fsig->param_count == 3)
7654 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7656 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7657 } else if (cmethod->string_ctor) {
7658 g_assert (!context_used);
7659 g_assert (!vtable_arg);
7660 /* we simply pass a null pointer */
7661 EMIT_NEW_PCONST (cfg, *sp, NULL);
7662 /* now call the string ctor */
7663 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7665 MonoInst* callvirt_this_arg = NULL;
7667 if (cmethod->klass->valuetype) {
7668 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7669 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7670 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7675 * The code generated by mini_emit_virtual_call () expects
7676 * iargs [0] to be a boxed instance, but luckily the vcall
7677 * will be transformed into a normal call there.
7679 } else if (context_used) {
7680 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7683 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7685 CHECK_TYPELOAD (cmethod->klass);
7688 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7689 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7690 * As a workaround, we call class cctors before allocating objects.
7692 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7693 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7694 if (cfg->verbose_level > 2)
7695 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7696 class_inits = g_slist_prepend (class_inits, vtable);
7699 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7702 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7705 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7707 /* Now call the actual ctor */
7708 /* Avoid virtual calls to ctors if possible */
7709 if (cmethod->klass->marshalbyref)
7710 callvirt_this_arg = sp [0];
7713 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7714 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7715 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7720 CHECK_CFG_EXCEPTION;
7725 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7726 mono_method_check_inlining (cfg, cmethod) &&
7727 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7728 !g_list_find (dont_inline, cmethod)) {
7731 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7732 cfg->real_offset += 5;
7735 inline_costs += costs - 5;
7738 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7740 } else if (context_used &&
7741 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7742 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7743 MonoInst *cmethod_addr;
7745 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7746 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7748 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7751 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7752 callvirt_this_arg, NULL, vtable_arg);
7756 if (alloc == NULL) {
7758 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7759 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7773 token = read32 (ip + 1);
7774 klass = mini_get_class (method, token, generic_context);
7775 CHECK_TYPELOAD (klass);
7776 if (sp [0]->type != STACK_OBJ)
7779 if (cfg->generic_sharing_context)
7780 context_used = mono_class_check_context_used (klass);
7782 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7789 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7791 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7795 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7796 MonoMethod *mono_castclass;
7797 MonoInst *iargs [1];
7800 mono_castclass = mono_marshal_get_castclass (klass);
7803 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7804 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7805 g_assert (costs > 0);
7808 cfg->real_offset += 5;
7813 inline_costs += costs;
7816 ins = handle_castclass (cfg, klass, *sp, context_used);
7817 CHECK_CFG_EXCEPTION;
7827 token = read32 (ip + 1);
7828 klass = mini_get_class (method, token, generic_context);
7829 CHECK_TYPELOAD (klass);
7830 if (sp [0]->type != STACK_OBJ)
7833 if (cfg->generic_sharing_context)
7834 context_used = mono_class_check_context_used (klass);
7836 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7843 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7845 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7849 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7850 MonoMethod *mono_isinst;
7851 MonoInst *iargs [1];
7854 mono_isinst = mono_marshal_get_isinst (klass);
7857 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7858 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7859 g_assert (costs > 0);
7862 cfg->real_offset += 5;
7867 inline_costs += costs;
7870 ins = handle_isinst (cfg, klass, *sp, context_used);
7871 CHECK_CFG_EXCEPTION;
7878 case CEE_UNBOX_ANY: {
7882 token = read32 (ip + 1);
7883 klass = mini_get_class (method, token, generic_context);
7884 CHECK_TYPELOAD (klass);
7886 mono_save_token_info (cfg, image, token, klass);
7888 if (cfg->generic_sharing_context)
7889 context_used = mono_class_check_context_used (klass);
7891 if (generic_class_is_reference_type (cfg, klass)) {
7892 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7893 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7894 MonoMethod *mono_castclass;
7895 MonoInst *iargs [1];
7898 mono_castclass = mono_marshal_get_castclass (klass);
7901 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7902 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7904 g_assert (costs > 0);
7907 cfg->real_offset += 5;
7911 inline_costs += costs;
7913 ins = handle_castclass (cfg, klass, *sp, context_used);
7914 CHECK_CFG_EXCEPTION;
7922 if (mono_class_is_nullable (klass)) {
7923 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7930 ins = handle_unbox (cfg, klass, sp, context_used);
7936 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7949 token = read32 (ip + 1);
7950 klass = mini_get_class (method, token, generic_context);
7951 CHECK_TYPELOAD (klass);
7953 mono_save_token_info (cfg, image, token, klass);
7955 if (cfg->generic_sharing_context)
7956 context_used = mono_class_check_context_used (klass);
7958 if (generic_class_is_reference_type (cfg, klass)) {
7964 if (klass == mono_defaults.void_class)
7966 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7968 /* frequent check in generic code: box (struct), brtrue */
7970 // FIXME: LLVM can't handle the inconsistent bb linking
7971 if (!mono_class_is_nullable (klass) &&
7972 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
7973 (ip [5] == CEE_BRTRUE ||
7974 ip [5] == CEE_BRTRUE_S ||
7975 ip [5] == CEE_BRFALSE ||
7976 ip [5] == CEE_BRFALSE_S)) {
7977 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
7979 MonoBasicBlock *true_bb, *false_bb;
7983 if (cfg->verbose_level > 3) {
7984 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7985 printf ("<box+brtrue opt>\n");
7993 target = ip + 1 + (signed char)(*ip);
8000 target = ip + 4 + (gint)(read32 (ip));
8004 g_assert_not_reached ();
8008 * We need to link both bblocks, since it is needed for handling stack
8009 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8010 * Branching to only one of them would lead to inconsistencies, so
8011 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8013 GET_BBLOCK (cfg, true_bb, target);
8014 GET_BBLOCK (cfg, false_bb, ip);
8016 mono_link_bblock (cfg, cfg->cbb, true_bb);
8017 mono_link_bblock (cfg, cfg->cbb, false_bb);
8019 if (sp != stack_start) {
8020 handle_stack_args (cfg, stack_start, sp - stack_start);
8022 CHECK_UNVERIFIABLE (cfg);
8025 if (COMPILE_LLVM (cfg)) {
8026 dreg = alloc_ireg (cfg);
8027 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8030 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8032 /* The JIT can't eliminate the iconst+compare */
8033 MONO_INST_NEW (cfg, ins, OP_BR);
8034 ins->inst_target_bb = is_true ? true_bb : false_bb;
8035 MONO_ADD_INS (cfg->cbb, ins);
8038 start_new_bblock = 1;
8042 *sp++ = handle_box (cfg, val, klass, context_used);
8044 CHECK_CFG_EXCEPTION;
8053 token = read32 (ip + 1);
8054 klass = mini_get_class (method, token, generic_context);
8055 CHECK_TYPELOAD (klass);
8057 mono_save_token_info (cfg, image, token, klass);
8059 if (cfg->generic_sharing_context)
8060 context_used = mono_class_check_context_used (klass);
8062 if (mono_class_is_nullable (klass)) {
8065 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8066 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8070 ins = handle_unbox (cfg, klass, sp, context_used);
8080 MonoClassField *field;
8084 if (*ip == CEE_STFLD) {
8091 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8093 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8096 token = read32 (ip + 1);
8097 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8098 field = mono_method_get_wrapper_data (method, token);
8099 klass = field->parent;
8102 field = mono_field_from_token (image, token, &klass, generic_context);
8106 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8107 FIELD_ACCESS_FAILURE;
8108 mono_class_init (klass);
8110 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8111 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8112 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8113 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8116 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8117 if (*ip == CEE_STFLD) {
8118 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8120 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8121 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8122 MonoInst *iargs [5];
8125 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8126 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8127 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8131 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8132 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8133 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8134 g_assert (costs > 0);
8136 cfg->real_offset += 5;
8139 inline_costs += costs;
8141 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8146 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8148 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8149 if (sp [0]->opcode != OP_LDADDR)
8150 store->flags |= MONO_INST_FAULT;
8152 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8153 /* insert call to write barrier */
8157 dreg = alloc_preg (cfg);
8158 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8159 emit_write_barrier (cfg, ptr, sp [1], -1);
8162 store->flags |= ins_flag;
8169 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8170 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8171 MonoInst *iargs [4];
8174 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8175 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8176 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8177 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8178 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8179 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8181 g_assert (costs > 0);
8183 cfg->real_offset += 5;
8187 inline_costs += costs;
8189 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8193 if (sp [0]->type == STACK_VTYPE) {
8196 /* Have to compute the address of the variable */
8198 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8200 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8202 g_assert (var->klass == klass);
8204 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8208 if (*ip == CEE_LDFLDA) {
8209 if (sp [0]->type == STACK_OBJ) {
8210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8211 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8214 dreg = alloc_preg (cfg);
8216 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8217 ins->klass = mono_class_from_mono_type (field->type);
8218 ins->type = STACK_MP;
8223 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8225 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8226 load->flags |= ins_flag;
8227 if (sp [0]->opcode != OP_LDADDR)
8228 load->flags |= MONO_INST_FAULT;
8239 MonoClassField *field;
8240 gpointer addr = NULL;
8241 gboolean is_special_static;
8244 token = read32 (ip + 1);
8246 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8247 field = mono_method_get_wrapper_data (method, token);
8248 klass = field->parent;
8251 field = mono_field_from_token (image, token, &klass, generic_context);
8254 mono_class_init (klass);
8255 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8256 FIELD_ACCESS_FAILURE;
8258 /* if the class is Critical then transparent code cannot access it's fields */
8259 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8260 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8263 * We can only support shared generic static
8264 * field access on architectures where the
8265 * trampoline code has been extended to handle
8266 * the generic class init.
8268 #ifndef MONO_ARCH_VTABLE_REG
8269 GENERIC_SHARING_FAILURE (*ip);
8272 if (cfg->generic_sharing_context)
8273 context_used = mono_class_check_context_used (klass);
8275 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8277 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8278 * to be called here.
8280 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8281 mono_class_vtable (cfg->domain, klass);
8282 CHECK_TYPELOAD (klass);
8284 mono_domain_lock (cfg->domain);
8285 if (cfg->domain->special_static_fields)
8286 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8287 mono_domain_unlock (cfg->domain);
8289 is_special_static = mono_class_field_is_special_static (field);
8291 /* Generate IR to compute the field address */
8292 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8294 * Fast access to TLS data
8295 * Inline version of get_thread_static_data () in
8299 int idx, static_data_reg, array_reg, dreg;
8300 MonoInst *thread_ins;
8302 // offset &= 0x7fffffff;
8303 // idx = (offset >> 24) - 1;
8304 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8306 thread_ins = mono_get_thread_intrinsic (cfg);
8307 MONO_ADD_INS (cfg->cbb, thread_ins);
8308 static_data_reg = alloc_ireg (cfg);
8309 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8311 if (cfg->compile_aot) {
8312 int offset_reg, offset2_reg, idx_reg;
8314 /* For TLS variables, this will return the TLS offset */
8315 EMIT_NEW_SFLDACONST (cfg, ins, field);
8316 offset_reg = ins->dreg;
8317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8318 idx_reg = alloc_ireg (cfg);
8319 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8321 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8322 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8323 array_reg = alloc_ireg (cfg);
8324 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8325 offset2_reg = alloc_ireg (cfg);
8326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8327 dreg = alloc_ireg (cfg);
8328 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8330 offset = (gsize)addr & 0x7fffffff;
8331 idx = (offset >> 24) - 1;
8333 array_reg = alloc_ireg (cfg);
8334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8335 dreg = alloc_ireg (cfg);
8336 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8338 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8339 (cfg->compile_aot && is_special_static) ||
8340 (context_used && is_special_static)) {
8341 MonoInst *iargs [2];
8343 g_assert (field->parent);
8344 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8346 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8347 field, MONO_RGCTX_INFO_CLASS_FIELD);
8349 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8351 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8352 } else if (context_used) {
8353 MonoInst *static_data;
8356 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8357 method->klass->name_space, method->klass->name, method->name,
8358 depth, field->offset);
8361 if (mono_class_needs_cctor_run (klass, method))
8362 emit_generic_class_init (cfg, klass);
8365 * The pointer we're computing here is
8367 * super_info.static_data + field->offset
8369 static_data = emit_get_rgctx_klass (cfg, context_used,
8370 klass, MONO_RGCTX_INFO_STATIC_DATA);
8372 if (field->offset == 0) {
8375 int addr_reg = mono_alloc_preg (cfg);
8376 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8378 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8379 MonoInst *iargs [2];
8381 g_assert (field->parent);
8382 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8383 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8384 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8386 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8388 CHECK_TYPELOAD (klass);
8390 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8391 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8392 if (cfg->verbose_level > 2)
8393 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8394 class_inits = g_slist_prepend (class_inits, vtable);
8396 if (cfg->run_cctors) {
8398 /* This makes so that inline cannot trigger */
8399 /* .cctors: too many apps depend on them */
8400 /* running with a specific order... */
8401 if (! vtable->initialized)
8403 ex = mono_runtime_class_init_full (vtable, FALSE);
8405 set_exception_object (cfg, ex);
8406 goto exception_exit;
8410 addr = (char*)vtable->data + field->offset;
8412 if (cfg->compile_aot)
8413 EMIT_NEW_SFLDACONST (cfg, ins, field);
8415 EMIT_NEW_PCONST (cfg, ins, addr);
8417 MonoInst *iargs [1];
8418 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8419 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8423 /* Generate IR to do the actual load/store operation */
8425 if (*ip == CEE_LDSFLDA) {
8426 ins->klass = mono_class_from_mono_type (field->type);
8427 ins->type = STACK_PTR;
8429 } else if (*ip == CEE_STSFLD) {
8434 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8435 store->flags |= ins_flag;
8437 gboolean is_const = FALSE;
8438 MonoVTable *vtable = NULL;
8440 if (!context_used) {
8441 vtable = mono_class_vtable (cfg->domain, klass);
8442 CHECK_TYPELOAD (klass);
8444 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8445 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8446 gpointer addr = (char*)vtable->data + field->offset;
8447 int ro_type = field->type->type;
8448 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8449 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8451 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8454 case MONO_TYPE_BOOLEAN:
8456 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8460 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8463 case MONO_TYPE_CHAR:
8465 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8469 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8474 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8478 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8484 case MONO_TYPE_FNPTR:
8485 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8486 type_to_eval_stack_type ((cfg), field->type, *sp);
8489 case MONO_TYPE_STRING:
8490 case MONO_TYPE_OBJECT:
8491 case MONO_TYPE_CLASS:
8492 case MONO_TYPE_SZARRAY:
8493 case MONO_TYPE_ARRAY:
8494 if (!mono_gc_is_moving ()) {
8495 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8496 type_to_eval_stack_type ((cfg), field->type, *sp);
8504 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8509 case MONO_TYPE_VALUETYPE:
8519 CHECK_STACK_OVF (1);
8521 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8522 load->flags |= ins_flag;
8535 token = read32 (ip + 1);
8536 klass = mini_get_class (method, token, generic_context);
8537 CHECK_TYPELOAD (klass);
8538 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8539 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8540 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8541 generic_class_is_reference_type (cfg, klass)) {
8542 /* insert call to write barrier */
8543 emit_write_barrier (cfg, sp [0], sp [1], -1);
8555 const char *data_ptr;
8557 guint32 field_token;
8563 token = read32 (ip + 1);
8565 klass = mini_get_class (method, token, generic_context);
8566 CHECK_TYPELOAD (klass);
8568 if (cfg->generic_sharing_context)
8569 context_used = mono_class_check_context_used (klass);
8571 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8572 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8573 ins->sreg1 = sp [0]->dreg;
8574 ins->type = STACK_I4;
8575 ins->dreg = alloc_ireg (cfg);
8576 MONO_ADD_INS (cfg->cbb, ins);
8577 *sp = mono_decompose_opcode (cfg, ins);
8582 MonoClass *array_class = mono_array_class_get (klass, 1);
8583 /* FIXME: we cannot get a managed
8584 allocator because we can't get the
8585 open generic class's vtable. We
8586 have the same problem in
8587 handle_alloc(). This
8588 needs to be solved so that we can
8589 have managed allocs of shared
8592 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8593 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8595 MonoMethod *managed_alloc = NULL;
8597 /* FIXME: Decompose later to help abcrem */
8600 args [0] = emit_get_rgctx_klass (cfg, context_used,
8601 array_class, MONO_RGCTX_INFO_VTABLE);
8606 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8608 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8610 if (cfg->opt & MONO_OPT_SHARED) {
8611 /* Decompose now to avoid problems with references to the domainvar */
8612 MonoInst *iargs [3];
8614 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8615 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8618 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8620 /* Decompose later since it is needed by abcrem */
8621 MonoClass *array_type = mono_array_class_get (klass, 1);
8622 mono_class_vtable (cfg->domain, array_type);
8623 CHECK_TYPELOAD (array_type);
8625 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8626 ins->dreg = alloc_preg (cfg);
8627 ins->sreg1 = sp [0]->dreg;
8628 ins->inst_newa_class = klass;
8629 ins->type = STACK_OBJ;
8631 MONO_ADD_INS (cfg->cbb, ins);
8632 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8633 cfg->cbb->has_array_access = TRUE;
8635 /* Needed so mono_emit_load_get_addr () gets called */
8636 mono_get_got_var (cfg);
8646 * we inline/optimize the initialization sequence if possible.
8647 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8648 * for small sizes open code the memcpy
8649 * ensure the rva field is big enough
8651 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8652 MonoMethod *memcpy_method = get_memcpy_method ();
8653 MonoInst *iargs [3];
8654 int add_reg = alloc_preg (cfg);
8656 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8657 if (cfg->compile_aot) {
8658 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8660 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8662 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8663 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8672 if (sp [0]->type != STACK_OBJ)
8675 dreg = alloc_preg (cfg);
8676 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8677 ins->dreg = alloc_preg (cfg);
8678 ins->sreg1 = sp [0]->dreg;
8679 ins->type = STACK_I4;
8680 /* This flag will be inherited by the decomposition */
8681 ins->flags |= MONO_INST_FAULT;
8682 MONO_ADD_INS (cfg->cbb, ins);
8683 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8684 cfg->cbb->has_array_access = TRUE;
8692 if (sp [0]->type != STACK_OBJ)
8695 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8697 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8698 CHECK_TYPELOAD (klass);
8699 /* we need to make sure that this array is exactly the type it needs
8700 * to be for correctness. the wrappers are lax with their usage
8701 * so we need to ignore them here
8703 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8704 MonoClass *array_class = mono_array_class_get (klass, 1);
8705 mini_emit_check_array_type (cfg, sp [0], array_class);
8706 CHECK_TYPELOAD (array_class);
8710 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8725 case CEE_LDELEM_REF: {
8731 if (*ip == CEE_LDELEM) {
8733 token = read32 (ip + 1);
8734 klass = mini_get_class (method, token, generic_context);
8735 CHECK_TYPELOAD (klass);
8736 mono_class_init (klass);
8739 klass = array_access_to_klass (*ip);
8741 if (sp [0]->type != STACK_OBJ)
8744 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8746 if (sp [1]->opcode == OP_ICONST) {
8747 int array_reg = sp [0]->dreg;
8748 int index_reg = sp [1]->dreg;
8749 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8751 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8752 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8754 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8755 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8758 if (*ip == CEE_LDELEM)
8771 case CEE_STELEM_REF:
8778 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8780 if (*ip == CEE_STELEM) {
8782 token = read32 (ip + 1);
8783 klass = mini_get_class (method, token, generic_context);
8784 CHECK_TYPELOAD (klass);
8785 mono_class_init (klass);
8788 klass = array_access_to_klass (*ip);
8790 if (sp [0]->type != STACK_OBJ)
8793 /* storing a NULL doesn't need any of the complex checks in stelemref */
8794 if (generic_class_is_reference_type (cfg, klass) &&
8795 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8796 MonoMethod* helper = mono_marshal_get_stelemref ();
8797 MonoInst *iargs [3];
8799 if (sp [0]->type != STACK_OBJ)
8801 if (sp [2]->type != STACK_OBJ)
8808 mono_emit_method_call (cfg, helper, iargs, NULL);
8810 if (sp [1]->opcode == OP_ICONST) {
8811 int array_reg = sp [0]->dreg;
8812 int index_reg = sp [1]->dreg;
8813 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8815 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8816 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8818 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8819 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8823 if (*ip == CEE_STELEM)
8830 case CEE_CKFINITE: {
8834 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8835 ins->sreg1 = sp [0]->dreg;
8836 ins->dreg = alloc_freg (cfg);
8837 ins->type = STACK_R8;
8838 MONO_ADD_INS (bblock, ins);
8840 *sp++ = mono_decompose_opcode (cfg, ins);
8845 case CEE_REFANYVAL: {
8846 MonoInst *src_var, *src;
8848 int klass_reg = alloc_preg (cfg);
8849 int dreg = alloc_preg (cfg);
8852 MONO_INST_NEW (cfg, ins, *ip);
8855 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8856 CHECK_TYPELOAD (klass);
8857 mono_class_init (klass);
8859 if (cfg->generic_sharing_context)
8860 context_used = mono_class_check_context_used (klass);
8863 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8865 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8866 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8867 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8870 MonoInst *klass_ins;
8872 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8873 klass, MONO_RGCTX_INFO_KLASS);
8876 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8877 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8879 mini_emit_class_check (cfg, klass_reg, klass);
8881 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8882 ins->type = STACK_MP;
8887 case CEE_MKREFANY: {
8888 MonoInst *loc, *addr;
8891 MONO_INST_NEW (cfg, ins, *ip);
8894 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8895 CHECK_TYPELOAD (klass);
8896 mono_class_init (klass);
8898 if (cfg->generic_sharing_context)
8899 context_used = mono_class_check_context_used (klass);
8901 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8902 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8905 MonoInst *const_ins;
8906 int type_reg = alloc_preg (cfg);
8908 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8911 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8912 } else if (cfg->compile_aot) {
8913 int const_reg = alloc_preg (cfg);
8914 int type_reg = alloc_preg (cfg);
8916 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8921 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8922 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8924 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8926 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8927 ins->type = STACK_VTYPE;
8928 ins->klass = mono_defaults.typed_reference_class;
8935 MonoClass *handle_class;
8937 CHECK_STACK_OVF (1);
8940 n = read32 (ip + 1);
8942 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8943 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8944 handle = mono_method_get_wrapper_data (method, n);
8945 handle_class = mono_method_get_wrapper_data (method, n + 1);
8946 if (handle_class == mono_defaults.typehandle_class)
8947 handle = &((MonoClass*)handle)->byval_arg;
8950 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8954 mono_class_init (handle_class);
8955 if (cfg->generic_sharing_context) {
8956 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8957 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8958 /* This case handles ldtoken
8959 of an open type, like for
8962 } else if (handle_class == mono_defaults.typehandle_class) {
8963 /* If we get a MONO_TYPE_CLASS
8964 then we need to provide the
8966 instantiation of it. */
8967 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8970 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8971 } else if (handle_class == mono_defaults.fieldhandle_class)
8972 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8973 else if (handle_class == mono_defaults.methodhandle_class)
8974 context_used = mono_method_check_context_used (handle);
8976 g_assert_not_reached ();
8979 if ((cfg->opt & MONO_OPT_SHARED) &&
8980 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8981 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8982 MonoInst *addr, *vtvar, *iargs [3];
8983 int method_context_used;
8985 if (cfg->generic_sharing_context)
8986 method_context_used = mono_method_check_context_used (method);
8988 method_context_used = 0;
8990 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8992 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8993 EMIT_NEW_ICONST (cfg, iargs [1], n);
8994 if (method_context_used) {
8995 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8996 method, MONO_RGCTX_INFO_METHOD);
8997 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8999 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9000 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9002 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9004 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9006 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9008 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9009 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9010 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9011 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9012 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9013 MonoClass *tclass = mono_class_from_mono_type (handle);
9015 mono_class_init (tclass);
9017 ins = emit_get_rgctx_klass (cfg, context_used,
9018 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9019 } else if (cfg->compile_aot) {
9020 if (method->wrapper_type) {
9021 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9022 /* Special case for static synchronized wrappers */
9023 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9025 /* FIXME: n is not a normal token */
9026 cfg->disable_aot = TRUE;
9027 EMIT_NEW_PCONST (cfg, ins, NULL);
9030 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9033 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9035 ins->type = STACK_OBJ;
9036 ins->klass = cmethod->klass;
9039 MonoInst *addr, *vtvar;
9041 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9044 if (handle_class == mono_defaults.typehandle_class) {
9045 ins = emit_get_rgctx_klass (cfg, context_used,
9046 mono_class_from_mono_type (handle),
9047 MONO_RGCTX_INFO_TYPE);
9048 } else if (handle_class == mono_defaults.methodhandle_class) {
9049 ins = emit_get_rgctx_method (cfg, context_used,
9050 handle, MONO_RGCTX_INFO_METHOD);
9051 } else if (handle_class == mono_defaults.fieldhandle_class) {
9052 ins = emit_get_rgctx_field (cfg, context_used,
9053 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9055 g_assert_not_reached ();
9057 } else if (cfg->compile_aot) {
9058 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9060 EMIT_NEW_PCONST (cfg, ins, handle);
9062 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9063 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9064 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9074 MONO_INST_NEW (cfg, ins, OP_THROW);
9076 ins->sreg1 = sp [0]->dreg;
9078 bblock->out_of_line = TRUE;
9079 MONO_ADD_INS (bblock, ins);
9080 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9081 MONO_ADD_INS (bblock, ins);
9084 link_bblock (cfg, bblock, end_bblock);
9085 start_new_bblock = 1;
9087 case CEE_ENDFINALLY:
9088 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9089 MONO_ADD_INS (bblock, ins);
9091 start_new_bblock = 1;
9094 * Control will leave the method so empty the stack, otherwise
9095 * the next basic block will start with a nonempty stack.
9097 while (sp != stack_start) {
9105 if (*ip == CEE_LEAVE) {
9107 target = ip + 5 + (gint32)read32(ip + 1);
9110 target = ip + 2 + (signed char)(ip [1]);
9113 /* empty the stack */
9114 while (sp != stack_start) {
9119 * If this leave statement is in a catch block, check for a
9120 * pending exception, and rethrow it if necessary.
9121 * We avoid doing this in runtime invoke wrappers, since those are called
9122 * by native code which excepts the wrapper to catch all exceptions.
9124 for (i = 0; i < header->num_clauses; ++i) {
9125 MonoExceptionClause *clause = &header->clauses [i];
9128 * Use <= in the final comparison to handle clauses with multiple
9129 * leave statements, like in bug #78024.
9130 * The ordering of the exception clauses guarantees that we find the
9133 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9135 MonoBasicBlock *dont_throw;
9140 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9143 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9145 NEW_BBLOCK (cfg, dont_throw);
9148 * Currently, we allways rethrow the abort exception, despite the
9149 * fact that this is not correct. See thread6.cs for an example.
9150 * But propagating the abort exception is more important than
9151 * getting the sematics right.
9153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9155 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9157 MONO_START_BB (cfg, dont_throw);
9162 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9164 MonoExceptionClause *clause;
9166 for (tmp = handlers; tmp; tmp = tmp->next) {
9168 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9170 link_bblock (cfg, bblock, tblock);
9171 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9172 ins->inst_target_bb = tblock;
9173 ins->inst_eh_block = clause;
9174 MONO_ADD_INS (bblock, ins);
9175 bblock->has_call_handler = 1;
9176 if (COMPILE_LLVM (cfg)) {
9177 MonoBasicBlock *target_bb;
9180 * Link the finally bblock with the target, since it will
9181 * conceptually branch there.
9182 * FIXME: Have to link the bblock containing the endfinally.
9184 GET_BBLOCK (cfg, target_bb, target);
9185 link_bblock (cfg, tblock, target_bb);
9188 g_list_free (handlers);
9191 MONO_INST_NEW (cfg, ins, OP_BR);
9192 MONO_ADD_INS (bblock, ins);
9193 GET_BBLOCK (cfg, tblock, target);
9194 link_bblock (cfg, bblock, tblock);
9195 ins->inst_target_bb = tblock;
9196 start_new_bblock = 1;
9198 if (*ip == CEE_LEAVE)
9207 * Mono specific opcodes
9209 case MONO_CUSTOM_PREFIX: {
9211 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9215 case CEE_MONO_ICALL: {
9217 MonoJitICallInfo *info;
9219 token = read32 (ip + 2);
9220 func = mono_method_get_wrapper_data (method, token);
9221 info = mono_find_jit_icall_by_addr (func);
9224 CHECK_STACK (info->sig->param_count);
9225 sp -= info->sig->param_count;
9227 ins = mono_emit_jit_icall (cfg, info->func, sp);
9228 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9232 inline_costs += 10 * num_calls++;
9236 case CEE_MONO_LDPTR: {
9239 CHECK_STACK_OVF (1);
9241 token = read32 (ip + 2);
9243 ptr = mono_method_get_wrapper_data (method, token);
9244 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9245 MonoJitICallInfo *callinfo;
9246 const char *icall_name;
9248 icall_name = method->name + strlen ("__icall_wrapper_");
9249 g_assert (icall_name);
9250 callinfo = mono_find_jit_icall_by_name (icall_name);
9251 g_assert (callinfo);
9253 if (ptr == callinfo->func) {
9254 /* Will be transformed into an AOTCONST later */
9255 EMIT_NEW_PCONST (cfg, ins, ptr);
9261 /* FIXME: Generalize this */
9262 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9263 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9268 EMIT_NEW_PCONST (cfg, ins, ptr);
9271 inline_costs += 10 * num_calls++;
9272 /* Can't embed random pointers into AOT code */
9273 cfg->disable_aot = 1;
9276 case CEE_MONO_ICALL_ADDR: {
9277 MonoMethod *cmethod;
9280 CHECK_STACK_OVF (1);
9282 token = read32 (ip + 2);
9284 cmethod = mono_method_get_wrapper_data (method, token);
9286 if (cfg->compile_aot) {
9287 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9289 ptr = mono_lookup_internal_call (cmethod);
9291 EMIT_NEW_PCONST (cfg, ins, ptr);
9297 case CEE_MONO_VTADDR: {
9298 MonoInst *src_var, *src;
9304 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9305 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9310 case CEE_MONO_NEWOBJ: {
9311 MonoInst *iargs [2];
9313 CHECK_STACK_OVF (1);
9315 token = read32 (ip + 2);
9316 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9317 mono_class_init (klass);
9318 NEW_DOMAINCONST (cfg, iargs [0]);
9319 MONO_ADD_INS (cfg->cbb, iargs [0]);
9320 NEW_CLASSCONST (cfg, iargs [1], klass);
9321 MONO_ADD_INS (cfg->cbb, iargs [1]);
9322 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9324 inline_costs += 10 * num_calls++;
9327 case CEE_MONO_OBJADDR:
9330 MONO_INST_NEW (cfg, ins, OP_MOVE);
9331 ins->dreg = alloc_preg (cfg);
9332 ins->sreg1 = sp [0]->dreg;
9333 ins->type = STACK_MP;
9334 MONO_ADD_INS (cfg->cbb, ins);
9338 case CEE_MONO_LDNATIVEOBJ:
9340 * Similar to LDOBJ, but instead load the unmanaged
9341 * representation of the vtype to the stack.
9346 token = read32 (ip + 2);
9347 klass = mono_method_get_wrapper_data (method, token);
9348 g_assert (klass->valuetype);
9349 mono_class_init (klass);
9352 MonoInst *src, *dest, *temp;
9355 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9356 temp->backend.is_pinvoke = 1;
9357 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9358 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9360 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9361 dest->type = STACK_VTYPE;
9362 dest->klass = klass;
9368 case CEE_MONO_RETOBJ: {
9370 * Same as RET, but return the native representation of a vtype
9373 g_assert (cfg->ret);
9374 g_assert (mono_method_signature (method)->pinvoke);
9379 token = read32 (ip + 2);
9380 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9382 if (!cfg->vret_addr) {
9383 g_assert (cfg->ret_var_is_local);
9385 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9387 EMIT_NEW_RETLOADA (cfg, ins);
9389 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9391 if (sp != stack_start)
9394 MONO_INST_NEW (cfg, ins, OP_BR);
9395 ins->inst_target_bb = end_bblock;
9396 MONO_ADD_INS (bblock, ins);
9397 link_bblock (cfg, bblock, end_bblock);
9398 start_new_bblock = 1;
9402 case CEE_MONO_CISINST:
9403 case CEE_MONO_CCASTCLASS: {
9408 token = read32 (ip + 2);
9409 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9410 if (ip [1] == CEE_MONO_CISINST)
9411 ins = handle_cisinst (cfg, klass, sp [0]);
9413 ins = handle_ccastclass (cfg, klass, sp [0]);
9419 case CEE_MONO_SAVE_LMF:
9420 case CEE_MONO_RESTORE_LMF:
9421 #ifdef MONO_ARCH_HAVE_LMF_OPS
9422 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9423 MONO_ADD_INS (bblock, ins);
9424 cfg->need_lmf_area = TRUE;
9428 case CEE_MONO_CLASSCONST:
9429 CHECK_STACK_OVF (1);
9431 token = read32 (ip + 2);
9432 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9435 inline_costs += 10 * num_calls++;
9437 case CEE_MONO_NOT_TAKEN:
9438 bblock->out_of_line = TRUE;
9442 CHECK_STACK_OVF (1);
9444 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9445 ins->dreg = alloc_preg (cfg);
9446 ins->inst_offset = (gint32)read32 (ip + 2);
9447 ins->type = STACK_PTR;
9448 MONO_ADD_INS (bblock, ins);
9452 case CEE_MONO_DYN_CALL: {
9455 /* It would be easier to call a trampoline, but that would put an
9456 * extra frame on the stack, confusing exception handling. So
9457 * implement it inline using an opcode for now.
9460 if (!cfg->dyn_call_var) {
9461 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9462 /* prevent it from being register allocated */
9463 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9466 /* Has to use a call inst since it local regalloc expects it */
9467 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9468 ins = (MonoInst*)call;
9470 ins->sreg1 = sp [0]->dreg;
9471 ins->sreg2 = sp [1]->dreg;
9472 MONO_ADD_INS (bblock, ins);
9474 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9475 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9479 inline_costs += 10 * num_calls++;
9484 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9494 /* somewhat similar to LDTOKEN */
9495 MonoInst *addr, *vtvar;
9496 CHECK_STACK_OVF (1);
9497 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9499 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9500 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9502 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9503 ins->type = STACK_VTYPE;
9504 ins->klass = mono_defaults.argumenthandle_class;
9517 * The following transforms:
9518 * CEE_CEQ into OP_CEQ
9519 * CEE_CGT into OP_CGT
9520 * CEE_CGT_UN into OP_CGT_UN
9521 * CEE_CLT into OP_CLT
9522 * CEE_CLT_UN into OP_CLT_UN
9524 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9526 MONO_INST_NEW (cfg, ins, cmp->opcode);
9528 cmp->sreg1 = sp [0]->dreg;
9529 cmp->sreg2 = sp [1]->dreg;
9530 type_from_op (cmp, sp [0], sp [1]);
9532 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9533 cmp->opcode = OP_LCOMPARE;
9534 else if (sp [0]->type == STACK_R8)
9535 cmp->opcode = OP_FCOMPARE;
9537 cmp->opcode = OP_ICOMPARE;
9538 MONO_ADD_INS (bblock, cmp);
9539 ins->type = STACK_I4;
9540 ins->dreg = alloc_dreg (cfg, ins->type);
9541 type_from_op (ins, sp [0], sp [1]);
9543 if (cmp->opcode == OP_FCOMPARE) {
9545 * The backends expect the fceq opcodes to do the
9548 cmp->opcode = OP_NOP;
9549 ins->sreg1 = cmp->sreg1;
9550 ins->sreg2 = cmp->sreg2;
9552 MONO_ADD_INS (bblock, ins);
9559 MonoMethod *cil_method;
9560 gboolean needs_static_rgctx_invoke;
9562 CHECK_STACK_OVF (1);
9564 n = read32 (ip + 2);
9565 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9566 if (!cmethod || mono_loader_get_last_error ())
9568 mono_class_init (cmethod->klass);
9570 mono_save_token_info (cfg, image, n, cmethod);
9572 if (cfg->generic_sharing_context)
9573 context_used = mono_method_check_context_used (cmethod);
9575 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9577 cil_method = cmethod;
9578 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9579 METHOD_ACCESS_FAILURE;
9581 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9582 if (check_linkdemand (cfg, method, cmethod))
9584 CHECK_CFG_EXCEPTION;
9585 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9586 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9590 * Optimize the common case of ldftn+delegate creation
9592 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9593 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9594 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9596 int invoke_context_used = 0;
9598 invoke = mono_get_delegate_invoke (ctor_method->klass);
9599 if (!invoke || !mono_method_signature (invoke))
9602 if (cfg->generic_sharing_context)
9603 invoke_context_used = mono_method_check_context_used (invoke);
9605 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9606 /* FIXME: SGEN support */
9607 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9608 MonoInst *target_ins;
9611 if (cfg->verbose_level > 3)
9612 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9613 target_ins = sp [-1];
9615 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9616 CHECK_CFG_EXCEPTION;
9625 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9626 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9630 inline_costs += 10 * num_calls++;
9633 case CEE_LDVIRTFTN: {
9638 n = read32 (ip + 2);
9639 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9640 if (!cmethod || mono_loader_get_last_error ())
9642 mono_class_init (cmethod->klass);
9644 if (cfg->generic_sharing_context)
9645 context_used = mono_method_check_context_used (cmethod);
9647 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9648 if (check_linkdemand (cfg, method, cmethod))
9650 CHECK_CFG_EXCEPTION;
9651 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9652 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9658 args [1] = emit_get_rgctx_method (cfg, context_used,
9659 cmethod, MONO_RGCTX_INFO_METHOD);
9662 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9664 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9667 inline_costs += 10 * num_calls++;
9671 CHECK_STACK_OVF (1);
9673 n = read16 (ip + 2);
9675 EMIT_NEW_ARGLOAD (cfg, ins, n);
9680 CHECK_STACK_OVF (1);
9682 n = read16 (ip + 2);
9684 NEW_ARGLOADA (cfg, ins, n);
9685 MONO_ADD_INS (cfg->cbb, ins);
9693 n = read16 (ip + 2);
9695 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9697 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9701 CHECK_STACK_OVF (1);
9703 n = read16 (ip + 2);
9705 EMIT_NEW_LOCLOAD (cfg, ins, n);
9710 unsigned char *tmp_ip;
9711 CHECK_STACK_OVF (1);
9713 n = read16 (ip + 2);
9716 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9722 EMIT_NEW_LOCLOADA (cfg, ins, n);
9731 n = read16 (ip + 2);
9733 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9735 emit_stloc_ir (cfg, sp, header, n);
9742 if (sp != stack_start)
9744 if (cfg->method != method)
9746 * Inlining this into a loop in a parent could lead to
9747 * stack overflows which is different behavior than the
9748 * non-inlined case, thus disable inlining in this case.
9750 goto inline_failure;
9752 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9753 ins->dreg = alloc_preg (cfg);
9754 ins->sreg1 = sp [0]->dreg;
9755 ins->type = STACK_PTR;
9756 MONO_ADD_INS (cfg->cbb, ins);
9758 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9760 ins->flags |= MONO_INST_INIT;
9765 case CEE_ENDFILTER: {
9766 MonoExceptionClause *clause, *nearest;
9767 int cc, nearest_num;
9771 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9773 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9774 ins->sreg1 = (*sp)->dreg;
9775 MONO_ADD_INS (bblock, ins);
9776 start_new_bblock = 1;
9781 for (cc = 0; cc < header->num_clauses; ++cc) {
9782 clause = &header->clauses [cc];
9783 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9784 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9785 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9791 if ((ip - header->code) != nearest->handler_offset)
9796 case CEE_UNALIGNED_:
9797 ins_flag |= MONO_INST_UNALIGNED;
9798 /* FIXME: record alignment? we can assume 1 for now */
9803 ins_flag |= MONO_INST_VOLATILE;
9807 ins_flag |= MONO_INST_TAILCALL;
9808 cfg->flags |= MONO_CFG_HAS_TAIL;
9809 /* Can't inline tail calls at this time */
9810 inline_costs += 100000;
9817 token = read32 (ip + 2);
9818 klass = mini_get_class (method, token, generic_context);
9819 CHECK_TYPELOAD (klass);
9820 if (generic_class_is_reference_type (cfg, klass))
9821 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9823 mini_emit_initobj (cfg, *sp, NULL, klass);
9827 case CEE_CONSTRAINED_:
9829 token = read32 (ip + 2);
9830 if (method->wrapper_type != MONO_WRAPPER_NONE)
9831 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9833 constrained_call = mono_class_get_full (image, token, generic_context);
9834 CHECK_TYPELOAD (constrained_call);
9839 MonoInst *iargs [3];
9843 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9844 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9845 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9846 /* emit_memset only works when val == 0 */
9847 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9852 if (ip [1] == CEE_CPBLK) {
9853 MonoMethod *memcpy_method = get_memcpy_method ();
9854 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9856 MonoMethod *memset_method = get_memset_method ();
9857 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9867 ins_flag |= MONO_INST_NOTYPECHECK;
9869 ins_flag |= MONO_INST_NORANGECHECK;
9870 /* we ignore the no-nullcheck for now since we
9871 * really do it explicitly only when doing callvirt->call
9877 int handler_offset = -1;
9879 for (i = 0; i < header->num_clauses; ++i) {
9880 MonoExceptionClause *clause = &header->clauses [i];
9881 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9882 handler_offset = clause->handler_offset;
9887 bblock->flags |= BB_EXCEPTION_UNSAFE;
9889 g_assert (handler_offset != -1);
9891 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9892 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9893 ins->sreg1 = load->dreg;
9894 MONO_ADD_INS (bblock, ins);
9896 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9897 MONO_ADD_INS (bblock, ins);
9900 link_bblock (cfg, bblock, end_bblock);
9901 start_new_bblock = 1;
9909 CHECK_STACK_OVF (1);
9911 token = read32 (ip + 2);
9912 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9913 MonoType *type = mono_type_create_from_typespec (image, token);
9914 token = mono_type_size (type, &ialign);
9916 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9917 CHECK_TYPELOAD (klass);
9918 mono_class_init (klass);
9919 token = mono_class_value_size (klass, &align);
9921 EMIT_NEW_ICONST (cfg, ins, token);
9926 case CEE_REFANYTYPE: {
9927 MonoInst *src_var, *src;
9933 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9935 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9936 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9937 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9955 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9965 g_warning ("opcode 0x%02x not handled", *ip);
9969 if (start_new_bblock != 1)
9972 bblock->cil_length = ip - bblock->cil_code;
9973 bblock->next_bb = end_bblock;
9975 if (cfg->method == method && cfg->domainvar) {
9977 MonoInst *get_domain;
9979 cfg->cbb = init_localsbb;
9981 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9982 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9985 get_domain->dreg = alloc_preg (cfg);
9986 MONO_ADD_INS (cfg->cbb, get_domain);
9988 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9989 MONO_ADD_INS (cfg->cbb, store);
9992 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9993 if (cfg->compile_aot)
9994 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9995 mono_get_got_var (cfg);
9998 if (cfg->method == method && cfg->got_var)
9999 mono_emit_load_got_addr (cfg);
10004 cfg->cbb = init_localsbb;
10006 for (i = 0; i < header->num_locals; ++i) {
10007 MonoType *ptype = header->locals [i];
10008 int t = ptype->type;
10009 dreg = cfg->locals [i]->dreg;
10011 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10012 t = mono_class_enum_basetype (ptype->data.klass)->type;
10013 if (ptype->byref) {
10014 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10015 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10016 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10017 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10018 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10019 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10020 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10021 ins->type = STACK_R8;
10022 ins->inst_p0 = (void*)&r8_0;
10023 ins->dreg = alloc_dreg (cfg, STACK_R8);
10024 MONO_ADD_INS (init_localsbb, ins);
10025 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10026 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10027 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10028 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10030 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10035 if (cfg->init_ref_vars && cfg->method == method) {
10036 /* Emit initialization for ref vars */
10037 // FIXME: Avoid duplication initialization for IL locals.
10038 for (i = 0; i < cfg->num_varinfo; ++i) {
10039 MonoInst *ins = cfg->varinfo [i];
10041 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10042 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10046 /* Add a sequence point for method entry/exit events */
10048 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10049 MONO_ADD_INS (init_localsbb, ins);
10050 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10051 MONO_ADD_INS (cfg->bb_exit, ins);
10056 if (cfg->method == method) {
10057 MonoBasicBlock *bb;
10058 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10059 bb->region = mono_find_block_region (cfg, bb->real_offset);
10061 mono_create_spvar_for_region (cfg, bb->region);
10062 if (cfg->verbose_level > 2)
10063 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10067 g_slist_free (class_inits);
10068 dont_inline = g_list_remove (dont_inline, method);
10070 if (inline_costs < 0) {
10073 /* Method is too large */
10074 mname = mono_method_full_name (method, TRUE);
10075 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10076 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10078 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10079 mono_basic_block_free (original_bb);
10083 if ((cfg->verbose_level > 2) && (cfg->method == method))
10084 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10086 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10087 mono_basic_block_free (original_bb);
10088 return inline_costs;
10091 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10098 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10102 set_exception_type_from_invalid_il (cfg, method, ip);
10106 g_slist_free (class_inits);
10107 mono_basic_block_free (original_bb);
10108 dont_inline = g_list_remove (dont_inline, method);
10109 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10114 store_membase_reg_to_store_membase_imm (int opcode)
10117 case OP_STORE_MEMBASE_REG:
10118 return OP_STORE_MEMBASE_IMM;
10119 case OP_STOREI1_MEMBASE_REG:
10120 return OP_STOREI1_MEMBASE_IMM;
10121 case OP_STOREI2_MEMBASE_REG:
10122 return OP_STOREI2_MEMBASE_IMM;
10123 case OP_STOREI4_MEMBASE_REG:
10124 return OP_STOREI4_MEMBASE_IMM;
10125 case OP_STOREI8_MEMBASE_REG:
10126 return OP_STOREI8_MEMBASE_IMM;
10128 g_assert_not_reached ();
10134 #endif /* DISABLE_JIT */
10137 mono_op_to_op_imm (int opcode)
10141 return OP_IADD_IMM;
10143 return OP_ISUB_IMM;
10145 return OP_IDIV_IMM;
10147 return OP_IDIV_UN_IMM;
10149 return OP_IREM_IMM;
10151 return OP_IREM_UN_IMM;
10153 return OP_IMUL_IMM;
10155 return OP_IAND_IMM;
10159 return OP_IXOR_IMM;
10161 return OP_ISHL_IMM;
10163 return OP_ISHR_IMM;
10165 return OP_ISHR_UN_IMM;
10168 return OP_LADD_IMM;
10170 return OP_LSUB_IMM;
10172 return OP_LAND_IMM;
10176 return OP_LXOR_IMM;
10178 return OP_LSHL_IMM;
10180 return OP_LSHR_IMM;
10182 return OP_LSHR_UN_IMM;
10185 return OP_COMPARE_IMM;
10187 return OP_ICOMPARE_IMM;
10189 return OP_LCOMPARE_IMM;
10191 case OP_STORE_MEMBASE_REG:
10192 return OP_STORE_MEMBASE_IMM;
10193 case OP_STOREI1_MEMBASE_REG:
10194 return OP_STOREI1_MEMBASE_IMM;
10195 case OP_STOREI2_MEMBASE_REG:
10196 return OP_STOREI2_MEMBASE_IMM;
10197 case OP_STOREI4_MEMBASE_REG:
10198 return OP_STOREI4_MEMBASE_IMM;
10200 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10202 return OP_X86_PUSH_IMM;
10203 case OP_X86_COMPARE_MEMBASE_REG:
10204 return OP_X86_COMPARE_MEMBASE_IMM;
10206 #if defined(TARGET_AMD64)
10207 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10208 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10210 case OP_VOIDCALL_REG:
10211 return OP_VOIDCALL;
10219 return OP_LOCALLOC_IMM;
10226 ldind_to_load_membase (int opcode)
10230 return OP_LOADI1_MEMBASE;
10232 return OP_LOADU1_MEMBASE;
10234 return OP_LOADI2_MEMBASE;
10236 return OP_LOADU2_MEMBASE;
10238 return OP_LOADI4_MEMBASE;
10240 return OP_LOADU4_MEMBASE;
10242 return OP_LOAD_MEMBASE;
10243 case CEE_LDIND_REF:
10244 return OP_LOAD_MEMBASE;
10246 return OP_LOADI8_MEMBASE;
10248 return OP_LOADR4_MEMBASE;
10250 return OP_LOADR8_MEMBASE;
10252 g_assert_not_reached ();
10259 stind_to_store_membase (int opcode)
10263 return OP_STOREI1_MEMBASE_REG;
10265 return OP_STOREI2_MEMBASE_REG;
10267 return OP_STOREI4_MEMBASE_REG;
10269 case CEE_STIND_REF:
10270 return OP_STORE_MEMBASE_REG;
10272 return OP_STOREI8_MEMBASE_REG;
10274 return OP_STORER4_MEMBASE_REG;
10276 return OP_STORER8_MEMBASE_REG;
10278 g_assert_not_reached ();
10285 mono_load_membase_to_load_mem (int opcode)
10287 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10288 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10290 case OP_LOAD_MEMBASE:
10291 return OP_LOAD_MEM;
10292 case OP_LOADU1_MEMBASE:
10293 return OP_LOADU1_MEM;
10294 case OP_LOADU2_MEMBASE:
10295 return OP_LOADU2_MEM;
10296 case OP_LOADI4_MEMBASE:
10297 return OP_LOADI4_MEM;
10298 case OP_LOADU4_MEMBASE:
10299 return OP_LOADU4_MEM;
10300 #if SIZEOF_REGISTER == 8
10301 case OP_LOADI8_MEMBASE:
10302 return OP_LOADI8_MEM;
10311 op_to_op_dest_membase (int store_opcode, int opcode)
10313 #if defined(TARGET_X86)
10314 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10319 return OP_X86_ADD_MEMBASE_REG;
10321 return OP_X86_SUB_MEMBASE_REG;
10323 return OP_X86_AND_MEMBASE_REG;
10325 return OP_X86_OR_MEMBASE_REG;
10327 return OP_X86_XOR_MEMBASE_REG;
10330 return OP_X86_ADD_MEMBASE_IMM;
10333 return OP_X86_SUB_MEMBASE_IMM;
10336 return OP_X86_AND_MEMBASE_IMM;
10339 return OP_X86_OR_MEMBASE_IMM;
10342 return OP_X86_XOR_MEMBASE_IMM;
10348 #if defined(TARGET_AMD64)
10349 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10354 return OP_X86_ADD_MEMBASE_REG;
10356 return OP_X86_SUB_MEMBASE_REG;
10358 return OP_X86_AND_MEMBASE_REG;
10360 return OP_X86_OR_MEMBASE_REG;
10362 return OP_X86_XOR_MEMBASE_REG;
10364 return OP_X86_ADD_MEMBASE_IMM;
10366 return OP_X86_SUB_MEMBASE_IMM;
10368 return OP_X86_AND_MEMBASE_IMM;
10370 return OP_X86_OR_MEMBASE_IMM;
10372 return OP_X86_XOR_MEMBASE_IMM;
10374 return OP_AMD64_ADD_MEMBASE_REG;
10376 return OP_AMD64_SUB_MEMBASE_REG;
10378 return OP_AMD64_AND_MEMBASE_REG;
10380 return OP_AMD64_OR_MEMBASE_REG;
10382 return OP_AMD64_XOR_MEMBASE_REG;
10385 return OP_AMD64_ADD_MEMBASE_IMM;
10388 return OP_AMD64_SUB_MEMBASE_IMM;
10391 return OP_AMD64_AND_MEMBASE_IMM;
10394 return OP_AMD64_OR_MEMBASE_IMM;
10397 return OP_AMD64_XOR_MEMBASE_IMM;
10407 op_to_op_store_membase (int store_opcode, int opcode)
10409 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10412 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10413 return OP_X86_SETEQ_MEMBASE;
10415 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10416 return OP_X86_SETNE_MEMBASE;
10424 op_to_op_src1_membase (int load_opcode, int opcode)
10427 /* FIXME: This has sign extension issues */
10429 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10430 return OP_X86_COMPARE_MEMBASE8_IMM;
10433 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10438 return OP_X86_PUSH_MEMBASE;
10439 case OP_COMPARE_IMM:
10440 case OP_ICOMPARE_IMM:
10441 return OP_X86_COMPARE_MEMBASE_IMM;
10444 return OP_X86_COMPARE_MEMBASE_REG;
10448 #ifdef TARGET_AMD64
10449 /* FIXME: This has sign extension issues */
10451 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10452 return OP_X86_COMPARE_MEMBASE8_IMM;
10457 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10458 return OP_X86_PUSH_MEMBASE;
10460 /* FIXME: This only works for 32 bit immediates
10461 case OP_COMPARE_IMM:
10462 case OP_LCOMPARE_IMM:
10463 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10464 return OP_AMD64_COMPARE_MEMBASE_IMM;
10466 case OP_ICOMPARE_IMM:
10467 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10468 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10472 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10473 return OP_AMD64_COMPARE_MEMBASE_REG;
10476 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10477 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10486 op_to_op_src2_membase (int load_opcode, int opcode)
10489 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10495 return OP_X86_COMPARE_REG_MEMBASE;
10497 return OP_X86_ADD_REG_MEMBASE;
10499 return OP_X86_SUB_REG_MEMBASE;
10501 return OP_X86_AND_REG_MEMBASE;
10503 return OP_X86_OR_REG_MEMBASE;
10505 return OP_X86_XOR_REG_MEMBASE;
10509 #ifdef TARGET_AMD64
10510 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10513 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10515 return OP_X86_ADD_REG_MEMBASE;
10517 return OP_X86_SUB_REG_MEMBASE;
10519 return OP_X86_AND_REG_MEMBASE;
10521 return OP_X86_OR_REG_MEMBASE;
10523 return OP_X86_XOR_REG_MEMBASE;
10525 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10529 return OP_AMD64_COMPARE_REG_MEMBASE;
10531 return OP_AMD64_ADD_REG_MEMBASE;
10533 return OP_AMD64_SUB_REG_MEMBASE;
10535 return OP_AMD64_AND_REG_MEMBASE;
10537 return OP_AMD64_OR_REG_MEMBASE;
10539 return OP_AMD64_XOR_REG_MEMBASE;
10548 mono_op_to_op_imm_noemul (int opcode)
10551 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10557 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10565 return mono_op_to_op_imm (opcode);
10569 #ifndef DISABLE_JIT
10572 * mono_handle_global_vregs:
10574 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10578 mono_handle_global_vregs (MonoCompile *cfg)
10580 gint32 *vreg_to_bb;
10581 MonoBasicBlock *bb;
10584 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10586 #ifdef MONO_ARCH_SIMD_INTRINSICS
10587 if (cfg->uses_simd_intrinsics)
10588 mono_simd_simplify_indirection (cfg);
10591 /* Find local vregs used in more than one bb */
10592 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10593 MonoInst *ins = bb->code;
10594 int block_num = bb->block_num;
10596 if (cfg->verbose_level > 2)
10597 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10600 for (; ins; ins = ins->next) {
10601 const char *spec = INS_INFO (ins->opcode);
10602 int regtype = 0, regindex;
10605 if (G_UNLIKELY (cfg->verbose_level > 2))
10606 mono_print_ins (ins);
10608 g_assert (ins->opcode >= MONO_CEE_LAST);
10610 for (regindex = 0; regindex < 4; regindex ++) {
10613 if (regindex == 0) {
10614 regtype = spec [MONO_INST_DEST];
10615 if (regtype == ' ')
10618 } else if (regindex == 1) {
10619 regtype = spec [MONO_INST_SRC1];
10620 if (regtype == ' ')
10623 } else if (regindex == 2) {
10624 regtype = spec [MONO_INST_SRC2];
10625 if (regtype == ' ')
10628 } else if (regindex == 3) {
10629 regtype = spec [MONO_INST_SRC3];
10630 if (regtype == ' ')
10635 #if SIZEOF_REGISTER == 4
10636 /* In the LLVM case, the long opcodes are not decomposed */
10637 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10639 * Since some instructions reference the original long vreg,
10640 * and some reference the two component vregs, it is quite hard
10641 * to determine when it needs to be global. So be conservative.
10643 if (!get_vreg_to_inst (cfg, vreg)) {
10644 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10646 if (cfg->verbose_level > 2)
10647 printf ("LONG VREG R%d made global.\n", vreg);
10651 * Make the component vregs volatile since the optimizations can
10652 * get confused otherwise.
10654 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10655 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10659 g_assert (vreg != -1);
10661 prev_bb = vreg_to_bb [vreg];
10662 if (prev_bb == 0) {
10663 /* 0 is a valid block num */
10664 vreg_to_bb [vreg] = block_num + 1;
10665 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10666 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10669 if (!get_vreg_to_inst (cfg, vreg)) {
10670 if (G_UNLIKELY (cfg->verbose_level > 2))
10671 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10675 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10678 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10681 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10684 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10687 g_assert_not_reached ();
10691 /* Flag as having been used in more than one bb */
10692 vreg_to_bb [vreg] = -1;
10698 /* If a variable is used in only one bblock, convert it into a local vreg */
10699 for (i = 0; i < cfg->num_varinfo; i++) {
10700 MonoInst *var = cfg->varinfo [i];
10701 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10703 switch (var->type) {
10709 #if SIZEOF_REGISTER == 8
10712 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10713 /* Enabling this screws up the fp stack on x86 */
10716 /* Arguments are implicitly global */
10717 /* Putting R4 vars into registers doesn't work currently */
10718 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10720 * Make that the variable's liveness interval doesn't contain a call, since
10721 * that would cause the lvreg to be spilled, making the whole optimization
10724 /* This is too slow for JIT compilation */
10726 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10728 int def_index, call_index, ins_index;
10729 gboolean spilled = FALSE;
10734 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10735 const char *spec = INS_INFO (ins->opcode);
10737 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10738 def_index = ins_index;
10740 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10741 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10742 if (call_index > def_index) {
10748 if (MONO_IS_CALL (ins))
10749 call_index = ins_index;
10759 if (G_UNLIKELY (cfg->verbose_level > 2))
10760 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10761 var->flags |= MONO_INST_IS_DEAD;
10762 cfg->vreg_to_inst [var->dreg] = NULL;
10769 * Compress the varinfo and vars tables so the liveness computation is faster and
10770 * takes up less space.
10773 for (i = 0; i < cfg->num_varinfo; ++i) {
10774 MonoInst *var = cfg->varinfo [i];
10775 if (pos < i && cfg->locals_start == i)
10776 cfg->locals_start = pos;
10777 if (!(var->flags & MONO_INST_IS_DEAD)) {
10779 cfg->varinfo [pos] = cfg->varinfo [i];
10780 cfg->varinfo [pos]->inst_c0 = pos;
10781 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10782 cfg->vars [pos].idx = pos;
10783 #if SIZEOF_REGISTER == 4
10784 if (cfg->varinfo [pos]->type == STACK_I8) {
10785 /* Modify the two component vars too */
10788 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10789 var1->inst_c0 = pos;
10790 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10791 var1->inst_c0 = pos;
10798 cfg->num_varinfo = pos;
10799 if (cfg->locals_start > cfg->num_varinfo)
10800 cfg->locals_start = cfg->num_varinfo;
10804 * mono_spill_global_vars:
10806 * Generate spill code for variables which are not allocated to registers,
10807 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10808 * code is generated which could be optimized by the local optimization passes.
10811 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10813 MonoBasicBlock *bb;
10815 int orig_next_vreg;
10816 guint32 *vreg_to_lvreg;
10818 guint32 i, lvregs_len;
10819 gboolean dest_has_lvreg = FALSE;
10820 guint32 stacktypes [128];
10821 MonoInst **live_range_start, **live_range_end;
10822 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10824 *need_local_opts = FALSE;
10826 memset (spec2, 0, sizeof (spec2));
10828 /* FIXME: Move this function to mini.c */
10829 stacktypes ['i'] = STACK_PTR;
10830 stacktypes ['l'] = STACK_I8;
10831 stacktypes ['f'] = STACK_R8;
10832 #ifdef MONO_ARCH_SIMD_INTRINSICS
10833 stacktypes ['x'] = STACK_VTYPE;
10836 #if SIZEOF_REGISTER == 4
10837 /* Create MonoInsts for longs */
10838 for (i = 0; i < cfg->num_varinfo; i++) {
10839 MonoInst *ins = cfg->varinfo [i];
10841 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10842 switch (ins->type) {
10847 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10850 g_assert (ins->opcode == OP_REGOFFSET);
10852 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10854 tree->opcode = OP_REGOFFSET;
10855 tree->inst_basereg = ins->inst_basereg;
10856 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10858 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10860 tree->opcode = OP_REGOFFSET;
10861 tree->inst_basereg = ins->inst_basereg;
10862 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10872 /* FIXME: widening and truncation */
10875 * As an optimization, when a variable allocated to the stack is first loaded into
10876 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10877 * the variable again.
10879 orig_next_vreg = cfg->next_vreg;
10880 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10881 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10885 * These arrays contain the first and last instructions accessing a given
10887 * Since we emit bblocks in the same order we process them here, and we
10888 * don't split live ranges, these will precisely describe the live range of
10889 * the variable, i.e. the instruction range where a valid value can be found
10890 * in the variables location.
10891 * The live range is computed using the liveness info computed by the liveness pass.
10892 * We can't use vmv->range, since that is an abstract live range, and we need
10893 * one which is instruction precise.
10894 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10896 /* FIXME: Only do this if debugging info is requested */
10897 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10898 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10899 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10900 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10902 /* Add spill loads/stores */
10903 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10906 if (cfg->verbose_level > 2)
10907 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10909 /* Clear vreg_to_lvreg array */
10910 for (i = 0; i < lvregs_len; i++)
10911 vreg_to_lvreg [lvregs [i]] = 0;
10915 MONO_BB_FOR_EACH_INS (bb, ins) {
10916 const char *spec = INS_INFO (ins->opcode);
10917 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10918 gboolean store, no_lvreg;
10919 int sregs [MONO_MAX_SRC_REGS];
10921 if (G_UNLIKELY (cfg->verbose_level > 2))
10922 mono_print_ins (ins);
10924 if (ins->opcode == OP_NOP)
10928 * We handle LDADDR here as well, since it can only be decomposed
10929 * when variable addresses are known.
10931 if (ins->opcode == OP_LDADDR) {
10932 MonoInst *var = ins->inst_p0;
10934 if (var->opcode == OP_VTARG_ADDR) {
10935 /* Happens on SPARC/S390 where vtypes are passed by reference */
10936 MonoInst *vtaddr = var->inst_left;
10937 if (vtaddr->opcode == OP_REGVAR) {
10938 ins->opcode = OP_MOVE;
10939 ins->sreg1 = vtaddr->dreg;
10941 else if (var->inst_left->opcode == OP_REGOFFSET) {
10942 ins->opcode = OP_LOAD_MEMBASE;
10943 ins->inst_basereg = vtaddr->inst_basereg;
10944 ins->inst_offset = vtaddr->inst_offset;
10948 g_assert (var->opcode == OP_REGOFFSET);
10950 ins->opcode = OP_ADD_IMM;
10951 ins->sreg1 = var->inst_basereg;
10952 ins->inst_imm = var->inst_offset;
10955 *need_local_opts = TRUE;
10956 spec = INS_INFO (ins->opcode);
10959 if (ins->opcode < MONO_CEE_LAST) {
10960 mono_print_ins (ins);
10961 g_assert_not_reached ();
10965 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10969 if (MONO_IS_STORE_MEMBASE (ins)) {
10970 tmp_reg = ins->dreg;
10971 ins->dreg = ins->sreg2;
10972 ins->sreg2 = tmp_reg;
10975 spec2 [MONO_INST_DEST] = ' ';
10976 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10977 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10978 spec2 [MONO_INST_SRC3] = ' ';
10980 } else if (MONO_IS_STORE_MEMINDEX (ins))
10981 g_assert_not_reached ();
10986 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10987 printf ("\t %.3s %d", spec, ins->dreg);
10988 num_sregs = mono_inst_get_src_registers (ins, sregs);
10989 for (srcindex = 0; srcindex < 3; ++srcindex)
10990 printf (" %d", sregs [srcindex]);
10997 regtype = spec [MONO_INST_DEST];
10998 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11001 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11002 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11003 MonoInst *store_ins;
11005 MonoInst *def_ins = ins;
11006 int dreg = ins->dreg; /* The original vreg */
11008 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11010 if (var->opcode == OP_REGVAR) {
11011 ins->dreg = var->dreg;
11012 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11014 * Instead of emitting a load+store, use a _membase opcode.
11016 g_assert (var->opcode == OP_REGOFFSET);
11017 if (ins->opcode == OP_MOVE) {
11021 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11022 ins->inst_basereg = var->inst_basereg;
11023 ins->inst_offset = var->inst_offset;
11026 spec = INS_INFO (ins->opcode);
11030 g_assert (var->opcode == OP_REGOFFSET);
11032 prev_dreg = ins->dreg;
11034 /* Invalidate any previous lvreg for this vreg */
11035 vreg_to_lvreg [ins->dreg] = 0;
11039 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11041 store_opcode = OP_STOREI8_MEMBASE_REG;
11044 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11046 if (regtype == 'l') {
11047 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11048 mono_bblock_insert_after_ins (bb, ins, store_ins);
11049 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11050 mono_bblock_insert_after_ins (bb, ins, store_ins);
11051 def_ins = store_ins;
11054 g_assert (store_opcode != OP_STOREV_MEMBASE);
11056 /* Try to fuse the store into the instruction itself */
11057 /* FIXME: Add more instructions */
11058 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11059 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11060 ins->inst_imm = ins->inst_c0;
11061 ins->inst_destbasereg = var->inst_basereg;
11062 ins->inst_offset = var->inst_offset;
11063 spec = INS_INFO (ins->opcode);
11064 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11065 ins->opcode = store_opcode;
11066 ins->inst_destbasereg = var->inst_basereg;
11067 ins->inst_offset = var->inst_offset;
11071 tmp_reg = ins->dreg;
11072 ins->dreg = ins->sreg2;
11073 ins->sreg2 = tmp_reg;
11076 spec2 [MONO_INST_DEST] = ' ';
11077 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11078 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11079 spec2 [MONO_INST_SRC3] = ' ';
11081 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11082 // FIXME: The backends expect the base reg to be in inst_basereg
11083 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11085 ins->inst_basereg = var->inst_basereg;
11086 ins->inst_offset = var->inst_offset;
11087 spec = INS_INFO (ins->opcode);
11089 /* printf ("INS: "); mono_print_ins (ins); */
11090 /* Create a store instruction */
11091 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11093 /* Insert it after the instruction */
11094 mono_bblock_insert_after_ins (bb, ins, store_ins);
11096 def_ins = store_ins;
11099 * We can't assign ins->dreg to var->dreg here, since the
11100 * sregs could use it. So set a flag, and do it after
11103 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11104 dest_has_lvreg = TRUE;
11109 if (def_ins && !live_range_start [dreg]) {
11110 live_range_start [dreg] = def_ins;
11111 live_range_start_bb [dreg] = bb;
11118 num_sregs = mono_inst_get_src_registers (ins, sregs);
11119 for (srcindex = 0; srcindex < 3; ++srcindex) {
11120 regtype = spec [MONO_INST_SRC1 + srcindex];
11121 sreg = sregs [srcindex];
11123 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11124 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11125 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11126 MonoInst *use_ins = ins;
11127 MonoInst *load_ins;
11128 guint32 load_opcode;
11130 if (var->opcode == OP_REGVAR) {
11131 sregs [srcindex] = var->dreg;
11132 //mono_inst_set_src_registers (ins, sregs);
11133 live_range_end [sreg] = use_ins;
11134 live_range_end_bb [sreg] = bb;
11138 g_assert (var->opcode == OP_REGOFFSET);
11140 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11142 g_assert (load_opcode != OP_LOADV_MEMBASE);
11144 if (vreg_to_lvreg [sreg]) {
11145 g_assert (vreg_to_lvreg [sreg] != -1);
11147 /* The variable is already loaded to an lvreg */
11148 if (G_UNLIKELY (cfg->verbose_level > 2))
11149 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11150 sregs [srcindex] = vreg_to_lvreg [sreg];
11151 //mono_inst_set_src_registers (ins, sregs);
11155 /* Try to fuse the load into the instruction */
11156 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11157 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11158 sregs [0] = var->inst_basereg;
11159 //mono_inst_set_src_registers (ins, sregs);
11160 ins->inst_offset = var->inst_offset;
11161 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11162 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11163 sregs [1] = var->inst_basereg;
11164 //mono_inst_set_src_registers (ins, sregs);
11165 ins->inst_offset = var->inst_offset;
11167 if (MONO_IS_REAL_MOVE (ins)) {
11168 ins->opcode = OP_NOP;
11171 //printf ("%d ", srcindex); mono_print_ins (ins);
11173 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11175 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11176 if (var->dreg == prev_dreg) {
11178 * sreg refers to the value loaded by the load
11179 * emitted below, but we need to use ins->dreg
11180 * since it refers to the store emitted earlier.
11184 g_assert (sreg != -1);
11185 vreg_to_lvreg [var->dreg] = sreg;
11186 g_assert (lvregs_len < 1024);
11187 lvregs [lvregs_len ++] = var->dreg;
11191 sregs [srcindex] = sreg;
11192 //mono_inst_set_src_registers (ins, sregs);
11194 if (regtype == 'l') {
11195 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11196 mono_bblock_insert_before_ins (bb, ins, load_ins);
11197 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11198 mono_bblock_insert_before_ins (bb, ins, load_ins);
11199 use_ins = load_ins;
11202 #if SIZEOF_REGISTER == 4
11203 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11205 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11206 mono_bblock_insert_before_ins (bb, ins, load_ins);
11207 use_ins = load_ins;
11211 if (var->dreg < orig_next_vreg) {
11212 live_range_end [var->dreg] = use_ins;
11213 live_range_end_bb [var->dreg] = bb;
11217 mono_inst_set_src_registers (ins, sregs);
11219 if (dest_has_lvreg) {
11220 g_assert (ins->dreg != -1);
11221 vreg_to_lvreg [prev_dreg] = ins->dreg;
11222 g_assert (lvregs_len < 1024);
11223 lvregs [lvregs_len ++] = prev_dreg;
11224 dest_has_lvreg = FALSE;
11228 tmp_reg = ins->dreg;
11229 ins->dreg = ins->sreg2;
11230 ins->sreg2 = tmp_reg;
11233 if (MONO_IS_CALL (ins)) {
11234 /* Clear vreg_to_lvreg array */
11235 for (i = 0; i < lvregs_len; i++)
11236 vreg_to_lvreg [lvregs [i]] = 0;
11238 } else if (ins->opcode == OP_NOP) {
11240 MONO_INST_NULLIFY_SREGS (ins);
11243 if (cfg->verbose_level > 2)
11244 mono_print_ins_index (1, ins);
11247 /* Extend the live range based on the liveness info */
11248 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11249 for (i = 0; i < cfg->num_varinfo; i ++) {
11250 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11252 if (vreg_is_volatile (cfg, vi->vreg))
11253 /* The liveness info is incomplete */
11256 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11257 /* Live from at least the first ins of this bb */
11258 live_range_start [vi->vreg] = bb->code;
11259 live_range_start_bb [vi->vreg] = bb;
11262 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11263 /* Live at least until the last ins of this bb */
11264 live_range_end [vi->vreg] = bb->last_ins;
11265 live_range_end_bb [vi->vreg] = bb;
11271 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11273 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11274 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11276 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11277 for (i = 0; i < cfg->num_varinfo; ++i) {
11278 int vreg = MONO_VARINFO (cfg, i)->vreg;
11281 if (live_range_start [vreg]) {
11282 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11284 ins->inst_c1 = vreg;
11285 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11287 if (live_range_end [vreg]) {
11288 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11290 ins->inst_c1 = vreg;
11291 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11292 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11294 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11300 g_free (live_range_start);
11301 g_free (live_range_end);
11302 g_free (live_range_start_bb);
11303 g_free (live_range_end_bb);
11308 * - use 'iadd' instead of 'int_add'
11309 * - handling ovf opcodes: decompose in method_to_ir.
11310 * - unify iregs/fregs
11311 * -> partly done, the missing parts are:
11312 * - a more complete unification would involve unifying the hregs as well, so
11313 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11314 * would no longer map to the machine hregs, so the code generators would need to
11315 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11316 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11317 * fp/non-fp branches speeds it up by about 15%.
11318 * - use sext/zext opcodes instead of shifts
11320 * - get rid of TEMPLOADs if possible and use vregs instead
11321 * - clean up usage of OP_P/OP_ opcodes
11322 * - cleanup usage of DUMMY_USE
11323 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11325 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11326 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11327 * - make sure handle_stack_args () is called before the branch is emitted
11328 * - when the new IR is done, get rid of all unused stuff
11329 * - COMPARE/BEQ as separate instructions or unify them ?
11330 * - keeping them separate allows specialized compare instructions like
11331 * compare_imm, compare_membase
11332 * - most back ends unify fp compare+branch, fp compare+ceq
11333 * - integrate mono_save_args into inline_method
11334 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11335 * - handle long shift opts on 32 bit platforms somehow: they require
11336 * 3 sregs (2 for arg1 and 1 for arg2)
11337 * - make byref a 'normal' type.
11338 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11339 * variable if needed.
11340 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11341 * like inline_method.
11342 * - remove inlining restrictions
11343 * - fix LNEG and enable cfold of INEG
11344 * - generalize x86 optimizations like ldelema as a peephole optimization
11345 * - add store_mem_imm for amd64
11346 * - optimize the loading of the interruption flag in the managed->native wrappers
11347 * - avoid special handling of OP_NOP in passes
11348 * - move code inserting instructions into one function/macro.
11349 * - try a coalescing phase after liveness analysis
11350 * - add float -> vreg conversion + local optimizations on !x86
11351 * - figure out how to handle decomposed branches during optimizations, ie.
11352 * compare+branch, op_jump_table+op_br etc.
11353 * - promote RuntimeXHandles to vregs
11354 * - vtype cleanups:
11355 * - add a NEW_VARLOADA_VREG macro
11356 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11357 * accessing vtype fields.
11358 * - get rid of I8CONST on 64 bit platforms
11359 * - dealing with the increase in code size due to branches created during opcode
11361 * - use extended basic blocks
11362 * - all parts of the JIT
11363 * - handle_global_vregs () && local regalloc
11364 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11365 * - sources of increase in code size:
11368 * - isinst and castclass
11369 * - lvregs not allocated to global registers even if used multiple times
11370 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11372 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11373 * - add all micro optimizations from the old JIT
11374 * - put tree optimizations into the deadce pass
11375 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11376 * specific function.
11377 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11378 * fcompare + branchCC.
11379 * - create a helper function for allocating a stack slot, taking into account
11380 * MONO_CFG_HAS_SPILLUP.
11382 * - merge the ia64 switch changes.
11383 * - optimize mono_regstate2_alloc_int/float.
11384 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11385 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11386 * parts of the tree could be separated by other instructions, killing the tree
11387 * arguments, or stores killing loads etc. Also, should we fold loads into other
11388 * instructions if the result of the load is used multiple times ?
11389 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11390 * - LAST MERGE: 108395.
11391 * - when returning vtypes in registers, generate IR and append it to the end of the
11392 * last bb instead of doing it in the epilog.
11393 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11401 - When to decompose opcodes:
11402 - earlier: this makes some optimizations hard to implement, since the low level IR
11403 no longer contains the neccessary information. But it is easier to do.
11404 - later: harder to implement, enables more optimizations.
11405 - Branches inside bblocks:
11406 - created when decomposing complex opcodes.
11407 - branches to another bblock: harmless, but not tracked by the branch
11408 optimizations, so need to branch to a label at the start of the bblock.
11409 - branches to inside the same bblock: very problematic, trips up the local
11410 reg allocator. Can be fixed by spitting the current bblock, but that is a
11411 complex operation, since some local vregs can become global vregs etc.
11412 - Local/global vregs:
11413 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11414 local register allocator.
11415 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11416 structure, created by mono_create_var (). Assigned to hregs or the stack by
11417 the global register allocator.
11418 - When to do optimizations like alu->alu_imm:
11419 - earlier -> saves work later on since the IR will be smaller/simpler
11420 - later -> can work on more instructions
11421 - Handling of valuetypes:
11422 - When a vtype is pushed on the stack, a new temporary is created, an
11423 instruction computing its address (LDADDR) is emitted and pushed on
11424 the stack. Need to optimize cases when the vtype is used immediately as in
11425 argument passing, stloc etc.
11426 - Instead of the to_end stuff in the old JIT, simply call the function handling
11427 the values on the stack before emitting the last instruction of the bb.
11430 #endif /* DISABLE_JIT */