2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg = alloc_preg (cfg);
2091 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2092 } else if (cfg->compile_aot) {
2093 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2096 MONO_INST_NEW (cfg, ins, OP_PCONST);
2097 ins->inst_p0 = call->method;
2098 ins->dreg = method_reg;
2099 MONO_ADD_INS (cfg->cbb, ins);
2103 if (COMPILE_LLVM (cfg))
2104 call->imt_arg_reg = method_reg;
2106 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2108 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2113 static MonoJumpInfo *
2114 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2116 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2120 ji->data.target = target;
2125 inline static MonoCallInst *
2126 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2127 MonoInst **args, int calli, int virtual, int tail)
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2135 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2137 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2140 call->signature = sig;
2142 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2145 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2146 call->vret_var = cfg->vret_addr;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2150 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2153 temp->backend.is_pinvoke = sig->pinvoke;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2164 loada->dreg = alloc_preg (cfg);
2165 loada->inst_p0 = temp;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada->inst_p1 = call;
2168 MONO_ADD_INS (cfg->cbb, loada);
2170 call->inst.dreg = temp->dreg;
2172 call->vret_var = loada;
2173 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2174 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2185 MonoInst *in = call->args [i];
2187 if (i >= sig->hasthis)
2188 t = sig->params [i - sig->hasthis];
2190 t = &mono_defaults.int_class->byval_arg;
2191 t = mono_type_get_underlying_type (t);
2193 if (!t->byref && t->type == MONO_TYPE_R4) {
2194 MonoInst *iargs [1];
2198 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2200 /* The result will be in an int vreg */
2201 call->args [i] = conv;
2208 if (COMPILE_LLVM (cfg))
2209 mono_llvm_emit_call (cfg, call);
2211 mono_arch_emit_call (cfg, call);
2213 mono_arch_emit_call (cfg, call);
2216 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2217 cfg->flags |= MONO_CFG_HAS_CALLS;
2222 inline static MonoInst*
2223 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2225 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2227 call->inst.sreg1 = addr->dreg;
2229 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2231 return (MonoInst*)call;
2235 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2239 cfg->uses_rgctx_reg = TRUE;
2240 call->rgctx_reg = TRUE;
2242 call->rgctx_arg_reg = rgctx_reg;
2249 inline static MonoInst*
2250 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2256 rgctx_reg = mono_alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2259 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2261 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2262 return (MonoInst*)call;
2266 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2271 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2272 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2274 gboolean might_be_remote;
2275 gboolean virtual = this != NULL;
2276 gboolean enable_for_aot = TRUE;
2280 if (method->string_ctor) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2284 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2289 might_be_remote = this && sig->hasthis &&
2290 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2291 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2293 context_used = mono_method_check_context_used (method);
2294 if (might_be_remote && context_used) {
2297 g_assert (cfg->generic_sharing_context);
2299 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2301 return mono_emit_calli (cfg, sig, args, addr);
2304 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2306 if (might_be_remote)
2307 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2309 call->method = method;
2310 call->inst.flags |= MONO_INST_HAS_METHOD;
2311 call->inst.inst_left = this;
2314 int vtable_reg, slot_reg, this_reg;
2316 this_reg = this->dreg;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2322 /* Make a call to delegate->invoke_impl */
2323 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 call->inst.inst_basereg = this_reg;
2325 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2326 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2328 return (MonoInst*)call;
2332 if ((!cfg->compile_aot || enable_for_aot) &&
2333 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2334 (MONO_METHOD_IS_FINAL (method) &&
2335 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2336 !(method->klass->marshalbyref && context_used)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2349 if (!method->string_ctor)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2352 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2359 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2367 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2368 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2370 return (MonoInst*)call;
2373 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2375 vtable_reg = alloc_preg (cfg);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2377 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2379 #ifdef MONO_ARCH_HAVE_IMT
2381 guint32 imt_slot = mono_method_get_imt_slot (method);
2382 emit_imt_argument (cfg, call, imt_arg);
2383 slot_reg = vtable_reg;
2384 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2387 if (slot_reg == -1) {
2388 slot_reg = alloc_preg (cfg);
2389 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2390 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2393 slot_reg = vtable_reg;
2394 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2395 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2396 #ifdef MONO_ARCH_HAVE_IMT
2398 g_assert (mono_method_signature (method)->generic_param_count);
2399 emit_imt_argument (cfg, call, imt_arg);
2404 call->inst.sreg1 = slot_reg;
2405 call->virtual = TRUE;
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2414 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2415 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2422 rgctx_reg = mono_alloc_preg (cfg);
2423 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2425 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2427 call = (MonoCallInst*)ins;
2429 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2435 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2437 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst*
2472 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2473 MonoMethodSignature *sig, MonoInst **args)
2475 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2482 if (cfg->abs_patches == NULL)
2483 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2484 g_hash_table_insert (cfg->abs_patches, ji, ji);
2485 ins = mono_emit_native_call (cfg, ji, sig, args);
2486 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2491 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2494 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2502 case OP_LOADI1_MEMBASE:
2503 widen_op = OP_ICONV_TO_I1;
2505 case OP_LOADU1_MEMBASE:
2506 widen_op = OP_ICONV_TO_U1;
2508 case OP_LOADI2_MEMBASE:
2509 widen_op = OP_ICONV_TO_I2;
2511 case OP_LOADU2_MEMBASE:
2512 widen_op = OP_ICONV_TO_U2;
2518 if (widen_op != -1) {
2519 int dreg = alloc_preg (cfg);
2522 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2523 widen->type = ins->type;
2533 get_memcpy_method (void)
2535 static MonoMethod *memcpy_method = NULL;
2536 if (!memcpy_method) {
2537 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method;
2545 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2547 MonoClassField *field;
2548 gpointer iter = NULL;
2550 while ((field = mono_class_get_fields (klass, &iter))) {
2553 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2555 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2556 if (mono_type_is_reference (field->type)) {
2557 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2558 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2560 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2561 MonoClass *field_class = mono_class_from_mono_type (field->type);
2562 if (field_class->has_references)
2563 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2569 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2571 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2572 unsigned need_wb = 0;
2577 /*types with references can't have alignment smaller than sizeof(void*) */
2578 if (align < SIZEOF_VOID_P)
2581 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2582 if (size > 32 * SIZEOF_VOID_P)
2585 create_write_barrier_bitmap (klass, &need_wb, 0);
2587 /* We don't unroll more than 5 stores to avoid code bloat. */
2588 if (size > 5 * SIZEOF_VOID_P) {
2589 /*FIXME this is a temporary fix while issues with valuetypes are solved.*/
2590 #if SIZEOF_VOID_P == 8
2593 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2594 size += (SIZEOF_VOID_P - 1);
2595 size &= ~(SIZEOF_VOID_P - 1);
2597 EMIT_NEW_ICONST (cfg, iargs [2], size);
2598 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2599 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2603 destreg = iargs [0]->dreg;
2604 srcreg = iargs [1]->dreg;
2607 dest_ptr_reg = alloc_preg (cfg);
2608 tmp_reg = alloc_preg (cfg);
2611 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2613 while (size >= SIZEOF_VOID_P) {
2614 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2617 if (need_wb & 0x1) {
2618 MonoInst *dummy_use;
2620 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2621 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2623 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2624 dummy_use->sreg1 = dest_ptr_reg;
2625 MONO_ADD_INS (cfg->cbb, dummy_use);
2629 offset += SIZEOF_VOID_P;
2630 size -= SIZEOF_VOID_P;
2633 /*tmp += sizeof (void*)*/
2634 if (size >= SIZEOF_VOID_P) {
2635 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2636 MONO_ADD_INS (cfg->cbb, iargs [0]);
2640 /* Those cannot be references since size < sizeof (void*) */
2642 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2643 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2649 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2656 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2666 * Emit code to copy a valuetype of type @klass whose address is stored in
2667 * @src->dreg to memory whose address is stored at @dest->dreg.
2670 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2672 MonoInst *iargs [4];
2675 MonoMethod *memcpy_method;
2679 * This check breaks with spilled vars... need to handle it during verification anyway.
2680 * g_assert (klass && klass == src->klass && klass == dest->klass);
2684 n = mono_class_native_size (klass, &align);
2686 n = mono_class_value_size (klass, &align);
2688 /* if native is true there should be no references in the struct */
2689 if (cfg->gen_write_barriers && klass->has_references && !native) {
2690 /* Avoid barriers when storing to the stack */
2691 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2692 (dest->opcode == OP_LDADDR))) {
2693 int context_used = 0;
2698 if (cfg->generic_sharing_context)
2699 context_used = mono_class_check_context_used (klass);
2701 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2702 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2704 } else if (context_used) {
2705 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2707 if (cfg->compile_aot) {
2708 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2710 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2711 mono_class_compute_gc_descriptor (klass);
2715 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2720 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2721 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2722 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2726 EMIT_NEW_ICONST (cfg, iargs [2], n);
2728 memcpy_method = get_memcpy_method ();
2729 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2734 get_memset_method (void)
2736 static MonoMethod *memset_method = NULL;
2737 if (!memset_method) {
2738 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2740 g_error ("Old corlib found. Install a new one");
2742 return memset_method;
2746 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2748 MonoInst *iargs [3];
2751 MonoMethod *memset_method;
2753 /* FIXME: Optimize this for the case when dest is an LDADDR */
2755 mono_class_init (klass);
2756 n = mono_class_value_size (klass, &align);
2758 if (n <= sizeof (gpointer) * 5) {
2759 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2762 memset_method = get_memset_method ();
2764 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2765 EMIT_NEW_ICONST (cfg, iargs [2], n);
2766 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2771 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2773 MonoInst *this = NULL;
2775 g_assert (cfg->generic_sharing_context);
2777 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2778 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2779 !method->klass->valuetype)
2780 EMIT_NEW_ARGLOAD (cfg, this, 0);
2782 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2783 MonoInst *mrgctx_loc, *mrgctx_var;
2786 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2788 mrgctx_loc = mono_get_vtable_var (cfg);
2789 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2792 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2793 MonoInst *vtable_loc, *vtable_var;
2797 vtable_loc = mono_get_vtable_var (cfg);
2798 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2800 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2801 MonoInst *mrgctx_var = vtable_var;
2804 vtable_reg = alloc_preg (cfg);
2805 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2806 vtable_var->type = STACK_PTR;
2812 int vtable_reg, res_reg;
2814 vtable_reg = alloc_preg (cfg);
2815 res_reg = alloc_preg (cfg);
2816 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2821 static MonoJumpInfoRgctxEntry *
2822 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2824 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2825 res->method = method;
2826 res->in_mrgctx = in_mrgctx;
2827 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2828 res->data->type = patch_type;
2829 res->data->data.target = patch_data;
2830 res->info_type = info_type;
2835 static inline MonoInst*
2836 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2838 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2842 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2843 MonoClass *klass, int rgctx_type)
2845 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2846 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2848 return emit_rgctx_fetch (cfg, rgctx, entry);
2852 * emit_get_rgctx_method:
2854 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2855 * normal constants, else emit a load from the rgctx.
2858 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2859 MonoMethod *cmethod, int rgctx_type)
2861 if (!context_used) {
2864 switch (rgctx_type) {
2865 case MONO_RGCTX_INFO_METHOD:
2866 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2868 case MONO_RGCTX_INFO_METHOD_RGCTX:
2869 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2872 g_assert_not_reached ();
2875 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2876 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2878 return emit_rgctx_fetch (cfg, rgctx, entry);
2883 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2884 MonoClassField *field, int rgctx_type)
2886 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2887 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2889 return emit_rgctx_fetch (cfg, rgctx, entry);
2893 * On return the caller must check @klass for load errors.
2896 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2898 MonoInst *vtable_arg;
2900 int context_used = 0;
2902 if (cfg->generic_sharing_context)
2903 context_used = mono_class_check_context_used (klass);
2906 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2907 klass, MONO_RGCTX_INFO_VTABLE);
2909 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2913 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2916 if (COMPILE_LLVM (cfg))
2917 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2919 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2920 #ifdef MONO_ARCH_VTABLE_REG
2921 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2922 cfg->uses_vtable_reg = TRUE;
2929 * On return the caller must check @array_class for load errors
2932 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2934 int vtable_reg = alloc_preg (cfg);
2935 int context_used = 0;
2937 if (cfg->generic_sharing_context)
2938 context_used = mono_class_check_context_used (array_class);
2940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2942 if (cfg->opt & MONO_OPT_SHARED) {
2943 int class_reg = alloc_preg (cfg);
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2945 if (cfg->compile_aot) {
2946 int klass_reg = alloc_preg (cfg);
2947 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2948 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2950 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2952 } else if (context_used) {
2953 MonoInst *vtable_ins;
2955 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2956 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2958 if (cfg->compile_aot) {
2962 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2964 vt_reg = alloc_preg (cfg);
2965 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2966 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2969 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2975 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2979 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2981 if (mini_get_debug_options ()->better_cast_details) {
2982 int to_klass_reg = alloc_preg (cfg);
2983 int vtable_reg = alloc_preg (cfg);
2984 int klass_reg = alloc_preg (cfg);
2985 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2988 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2992 MONO_ADD_INS (cfg->cbb, tls_get);
2993 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2996 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2997 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2998 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3003 reset_cast_details (MonoCompile *cfg)
3005 /* Reset the variables holding the cast details */
3006 if (mini_get_debug_options ()->better_cast_details) {
3007 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3009 MONO_ADD_INS (cfg->cbb, tls_get);
3010 /* It is enough to reset the from field */
3011 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3016 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3017 * generic code is generated.
3020 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3022 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3025 MonoInst *rgctx, *addr;
3027 /* FIXME: What if the class is shared? We might not
3028 have to get the address of the method from the
3030 addr = emit_get_rgctx_method (cfg, context_used, method,
3031 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3033 rgctx = emit_get_rgctx (cfg, method, context_used);
3035 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3037 return mono_emit_method_call (cfg, method, &val, NULL);
3042 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3046 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3047 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3048 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3049 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3051 obj_reg = sp [0]->dreg;
3052 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3053 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3055 /* FIXME: generics */
3056 g_assert (klass->rank == 0);
3059 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3060 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3062 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3066 MonoInst *element_class;
3068 /* This assertion is from the unboxcast insn */
3069 g_assert (klass->rank == 0);
3071 element_class = emit_get_rgctx_klass (cfg, context_used,
3072 klass->element_class, MONO_RGCTX_INFO_KLASS);
3074 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3075 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3077 save_cast_details (cfg, klass->element_class, obj_reg);
3078 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3079 reset_cast_details (cfg);
3082 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3083 MONO_ADD_INS (cfg->cbb, add);
3084 add->type = STACK_MP;
3091 * Returns NULL and set the cfg exception on error.
3094 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3096 MonoInst *iargs [2];
3102 MonoInst *iargs [2];
3105 FIXME: we cannot get managed_alloc here because we can't get
3106 the class's vtable (because it's not a closed class)
3108 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3109 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3112 if (cfg->opt & MONO_OPT_SHARED)
3113 rgctx_info = MONO_RGCTX_INFO_KLASS;
3115 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3116 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3118 if (cfg->opt & MONO_OPT_SHARED) {
3119 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3121 alloc_ftn = mono_object_new;
3124 alloc_ftn = mono_object_new_specific;
3127 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3130 if (cfg->opt & MONO_OPT_SHARED) {
3131 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3132 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3134 alloc_ftn = mono_object_new;
3135 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3136 /* This happens often in argument checking code, eg. throw new FooException... */
3137 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3138 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3139 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3141 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3142 MonoMethod *managed_alloc = NULL;
3146 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3147 cfg->exception_ptr = klass;
3151 #ifndef MONO_CROSS_COMPILE
3152 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3155 if (managed_alloc) {
3156 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3157 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3159 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3161 guint32 lw = vtable->klass->instance_size;
3162 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3163 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3164 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3167 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3171 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3175 * Returns NULL and set the cfg exception on error.
3178 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3180 MonoInst *alloc, *ins;
3182 if (mono_class_is_nullable (klass)) {
3183 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3186 /* FIXME: What if the class is shared? We might not
3187 have to get the method address from the RGCTX. */
3188 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3189 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3190 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3192 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3194 return mono_emit_method_call (cfg, method, &val, NULL);
3198 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3202 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3207 // FIXME: This doesn't work yet (class libs tests fail?)
3208 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3211 * Returns NULL and set the cfg exception on error.
3214 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3216 MonoBasicBlock *is_null_bb;
3217 int obj_reg = src->dreg;
3218 int vtable_reg = alloc_preg (cfg);
3219 MonoInst *klass_inst = NULL;
3224 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3225 klass, MONO_RGCTX_INFO_KLASS);
3227 if (is_complex_isinst (klass)) {
3228 /* Complex case, handle by an icall */
3234 args [1] = klass_inst;
3236 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3238 /* Simple case, handled by the code below */
3242 NEW_BBLOCK (cfg, is_null_bb);
3244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3245 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3247 save_cast_details (cfg, klass, obj_reg);
3249 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3250 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3251 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3253 int klass_reg = alloc_preg (cfg);
3255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3257 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3258 /* the remoting code is broken, access the class for now */
3259 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3260 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3262 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3263 cfg->exception_ptr = klass;
3266 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3268 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3271 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3274 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3278 MONO_START_BB (cfg, is_null_bb);
3280 reset_cast_details (cfg);
3286 * Returns NULL and set the cfg exception on error.
3289 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3292 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3293 int obj_reg = src->dreg;
3294 int vtable_reg = alloc_preg (cfg);
3295 int res_reg = alloc_preg (cfg);
3296 MonoInst *klass_inst = NULL;
3299 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3301 if (is_complex_isinst (klass)) {
3304 /* Complex case, handle by an icall */
3310 args [1] = klass_inst;
3312 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3314 /* Simple case, the code below can handle it */
3318 NEW_BBLOCK (cfg, is_null_bb);
3319 NEW_BBLOCK (cfg, false_bb);
3320 NEW_BBLOCK (cfg, end_bb);
3322 /* Do the assignment at the beginning, so the other assignment can be if converted */
3323 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3324 ins->type = STACK_OBJ;
3327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3330 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3332 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3333 g_assert (!context_used);
3334 /* the is_null_bb target simply copies the input register to the output */
3335 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3337 int klass_reg = alloc_preg (cfg);
3340 int rank_reg = alloc_preg (cfg);
3341 int eclass_reg = alloc_preg (cfg);
3343 g_assert (!context_used);
3344 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3346 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3349 if (klass->cast_class == mono_defaults.object_class) {
3350 int parent_reg = alloc_preg (cfg);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3352 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3353 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3355 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3356 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3357 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3359 } else if (klass->cast_class == mono_defaults.enum_class) {
3360 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3362 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3363 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3365 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3366 /* Check that the object is a vector too */
3367 int bounds_reg = alloc_preg (cfg);
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3369 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3370 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3373 /* the is_null_bb target simply copies the input register to the output */
3374 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3376 } else if (mono_class_is_nullable (klass)) {
3377 g_assert (!context_used);
3378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3379 /* the is_null_bb target simply copies the input register to the output */
3380 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3382 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3383 g_assert (!context_used);
3384 /* the remoting code is broken, access the class for now */
3385 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3386 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3388 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3389 cfg->exception_ptr = klass;
3392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3397 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3401 /* the is_null_bb target simply copies the input register to the output */
3402 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3407 MONO_START_BB (cfg, false_bb);
3409 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3412 MONO_START_BB (cfg, is_null_bb);
3414 MONO_START_BB (cfg, end_bb);
3420 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3422 /* This opcode takes as input an object reference and a class, and returns:
3423 0) if the object is an instance of the class,
3424 1) if the object is not instance of the class,
3425 2) if the object is a proxy whose type cannot be determined */
3428 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3429 int obj_reg = src->dreg;
3430 int dreg = alloc_ireg (cfg);
3432 int klass_reg = alloc_preg (cfg);
3434 NEW_BBLOCK (cfg, true_bb);
3435 NEW_BBLOCK (cfg, false_bb);
3436 NEW_BBLOCK (cfg, false2_bb);
3437 NEW_BBLOCK (cfg, end_bb);
3438 NEW_BBLOCK (cfg, no_proxy_bb);
3440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3441 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3443 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3444 NEW_BBLOCK (cfg, interface_fail_bb);
3446 tmp_reg = alloc_preg (cfg);
3447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3448 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3449 MONO_START_BB (cfg, interface_fail_bb);
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3452 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3454 tmp_reg = alloc_preg (cfg);
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3457 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3459 tmp_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3463 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3464 tmp_reg = alloc_preg (cfg);
3465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3468 tmp_reg = alloc_preg (cfg);
3469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3470 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3473 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3476 MONO_START_BB (cfg, no_proxy_bb);
3478 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3481 MONO_START_BB (cfg, false_bb);
3483 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3486 MONO_START_BB (cfg, false2_bb);
3488 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3491 MONO_START_BB (cfg, true_bb);
3493 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3495 MONO_START_BB (cfg, end_bb);
3498 MONO_INST_NEW (cfg, ins, OP_ICONST);
3500 ins->type = STACK_I4;
3506 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3508 /* This opcode takes as input an object reference and a class, and returns:
3509 0) if the object is an instance of the class,
3510 1) if the object is a proxy whose type cannot be determined
3511 an InvalidCastException exception is thrown otherwhise*/
3514 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3515 int obj_reg = src->dreg;
3516 int dreg = alloc_ireg (cfg);
3517 int tmp_reg = alloc_preg (cfg);
3518 int klass_reg = alloc_preg (cfg);
3520 NEW_BBLOCK (cfg, end_bb);
3521 NEW_BBLOCK (cfg, ok_result_bb);
3523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3526 save_cast_details (cfg, klass, obj_reg);
3528 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3529 NEW_BBLOCK (cfg, interface_fail_bb);
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3532 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3533 MONO_START_BB (cfg, interface_fail_bb);
3534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3536 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3538 tmp_reg = alloc_preg (cfg);
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3541 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3543 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3547 NEW_BBLOCK (cfg, no_proxy_bb);
3549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3551 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3553 tmp_reg = alloc_preg (cfg);
3554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3557 tmp_reg = alloc_preg (cfg);
3558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3562 NEW_BBLOCK (cfg, fail_1_bb);
3564 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3566 MONO_START_BB (cfg, fail_1_bb);
3568 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3571 MONO_START_BB (cfg, no_proxy_bb);
3573 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3576 MONO_START_BB (cfg, ok_result_bb);
3578 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3580 MONO_START_BB (cfg, end_bb);
3583 MONO_INST_NEW (cfg, ins, OP_ICONST);
3585 ins->type = STACK_I4;
3591 * Returns NULL and set the cfg exception on error.
3593 static G_GNUC_UNUSED MonoInst*
3594 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3596 gpointer *trampoline;
3597 MonoInst *obj, *method_ins, *tramp_ins;
3601 obj = handle_alloc (cfg, klass, FALSE, 0);
3605 /* Inline the contents of mono_delegate_ctor */
3607 /* Set target field */
3608 /* Optimize away setting of NULL target */
3609 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3610 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3612 /* Set method field */
3613 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3614 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3617 * To avoid looking up the compiled code belonging to the target method
3618 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3619 * store it, and we fill it after the method has been compiled.
3621 if (!cfg->compile_aot && !method->dynamic) {
3622 MonoInst *code_slot_ins;
3625 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3627 domain = mono_domain_get ();
3628 mono_domain_lock (domain);
3629 if (!domain_jit_info (domain)->method_code_hash)
3630 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3631 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3633 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3634 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3636 mono_domain_unlock (domain);
3638 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3640 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3643 /* Set invoke_impl field */
3644 if (cfg->compile_aot) {
3645 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3647 trampoline = mono_create_delegate_trampoline (klass);
3648 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3652 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3658 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3660 MonoJitICallInfo *info;
3662 /* Need to register the icall so it gets an icall wrapper */
3663 info = mono_get_array_new_va_icall (rank);
3665 cfg->flags |= MONO_CFG_HAS_VARARGS;
3667 /* mono_array_new_va () needs a vararg calling convention */
3668 cfg->disable_llvm = TRUE;
3670 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3671 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3675 mono_emit_load_got_addr (MonoCompile *cfg)
3677 MonoInst *getaddr, *dummy_use;
3679 if (!cfg->got_var || cfg->got_var_allocated)
3682 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3683 getaddr->dreg = cfg->got_var->dreg;
3685 /* Add it to the start of the first bblock */
3686 if (cfg->bb_entry->code) {
3687 getaddr->next = cfg->bb_entry->code;
3688 cfg->bb_entry->code = getaddr;
3691 MONO_ADD_INS (cfg->bb_entry, getaddr);
3693 cfg->got_var_allocated = TRUE;
3696 * Add a dummy use to keep the got_var alive, since real uses might
3697 * only be generated by the back ends.
3698 * Add it to end_bblock, so the variable's lifetime covers the whole
3700 * It would be better to make the usage of the got var explicit in all
3701 * cases when the backend needs it (i.e. calls, throw etc.), so this
3702 * wouldn't be needed.
3704 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3705 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3708 static int inline_limit;
3709 static gboolean inline_limit_inited;
3712 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3714 MonoMethodHeaderSummary header;
3716 #ifdef MONO_ARCH_SOFT_FLOAT
3717 MonoMethodSignature *sig = mono_method_signature (method);
3721 if (cfg->generic_sharing_context)
3724 if (cfg->inline_depth > 10)
3727 #ifdef MONO_ARCH_HAVE_LMF_OPS
3728 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3729 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3730 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3735 if (!mono_method_get_header_summary (method, &header))
3738 /*runtime, icall and pinvoke are checked by summary call*/
3739 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3740 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3741 (method->klass->marshalbyref) ||
3745 /* also consider num_locals? */
3746 /* Do the size check early to avoid creating vtables */
3747 if (!inline_limit_inited) {
3748 if (getenv ("MONO_INLINELIMIT"))
3749 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3751 inline_limit = INLINE_LENGTH_LIMIT;
3752 inline_limit_inited = TRUE;
3754 if (header.code_size >= inline_limit)
3758 * if we can initialize the class of the method right away, we do,
3759 * otherwise we don't allow inlining if the class needs initialization,
3760 * since it would mean inserting a call to mono_runtime_class_init()
3761 * inside the inlined code
3763 if (!(cfg->opt & MONO_OPT_SHARED)) {
3764 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3765 if (cfg->run_cctors && method->klass->has_cctor) {
3766 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3767 if (!method->klass->runtime_info)
3768 /* No vtable created yet */
3770 vtable = mono_class_vtable (cfg->domain, method->klass);
3773 /* This makes so that inline cannot trigger */
3774 /* .cctors: too many apps depend on them */
3775 /* running with a specific order... */
3776 if (! vtable->initialized)
3778 mono_runtime_class_init (vtable);
3780 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3781 if (!method->klass->runtime_info)
3782 /* No vtable created yet */
3784 vtable = mono_class_vtable (cfg->domain, method->klass);
3787 if (!vtable->initialized)
3792 * If we're compiling for shared code
3793 * the cctor will need to be run at aot method load time, for example,
3794 * or at the end of the compilation of the inlining method.
3796 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3801 * CAS - do not inline methods with declarative security
3802 * Note: this has to be before any possible return TRUE;
3804 if (mono_method_has_declsec (method))
3807 #ifdef MONO_ARCH_SOFT_FLOAT
3809 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3811 for (i = 0; i < sig->param_count; ++i)
3812 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3820 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3822 if (vtable->initialized && !cfg->compile_aot)
3825 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3828 if (!mono_class_needs_cctor_run (vtable->klass, method))
3831 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3832 /* The initialization is already done before the method is called */
3839 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3843 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3845 mono_class_init (klass);
3846 size = mono_class_array_element_size (klass);
3848 mult_reg = alloc_preg (cfg);
3849 array_reg = arr->dreg;
3850 index_reg = index->dreg;
3852 #if SIZEOF_REGISTER == 8
3853 /* The array reg is 64 bits but the index reg is only 32 */
3854 if (COMPILE_LLVM (cfg)) {
3856 index2_reg = index_reg;
3858 index2_reg = alloc_preg (cfg);
3859 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3862 if (index->type == STACK_I8) {
3863 index2_reg = alloc_preg (cfg);
3864 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3866 index2_reg = index_reg;
3871 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3873 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3874 if (size == 1 || size == 2 || size == 4 || size == 8) {
3875 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3877 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3878 ins->type = STACK_PTR;
3884 add_reg = alloc_preg (cfg);
3886 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3887 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3888 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3889 ins->type = STACK_PTR;
3890 MONO_ADD_INS (cfg->cbb, ins);
3895 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3897 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3899 int bounds_reg = alloc_preg (cfg);
3900 int add_reg = alloc_preg (cfg);
3901 int mult_reg = alloc_preg (cfg);
3902 int mult2_reg = alloc_preg (cfg);
3903 int low1_reg = alloc_preg (cfg);
3904 int low2_reg = alloc_preg (cfg);
3905 int high1_reg = alloc_preg (cfg);
3906 int high2_reg = alloc_preg (cfg);
3907 int realidx1_reg = alloc_preg (cfg);
3908 int realidx2_reg = alloc_preg (cfg);
3909 int sum_reg = alloc_preg (cfg);
3914 mono_class_init (klass);
3915 size = mono_class_array_element_size (klass);
3917 index1 = index_ins1->dreg;
3918 index2 = index_ins2->dreg;
3920 /* range checking */
3921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3922 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3924 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3925 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3926 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3927 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3928 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3929 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3930 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3933 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3934 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3935 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3936 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3937 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3938 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3940 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3941 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3943 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3944 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3946 ins->type = STACK_MP;
3948 MONO_ADD_INS (cfg->cbb, ins);
3955 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3959 MonoMethod *addr_method;
3962 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3965 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3967 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3968 /* emit_ldelema_2 depends on OP_LMUL */
3969 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3970 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3974 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3975 addr_method = mono_marshal_get_array_address (rank, element_size);
3976 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3981 static MonoBreakPolicy
3982 always_insert_breakpoint (MonoMethod *method)
3984 return MONO_BREAK_POLICY_ALWAYS;
3987 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3990 * mono_set_break_policy:
3991 * policy_callback: the new callback function
3993 * Allow embedders to decide wherther to actually obey breakpoint instructions
3994 * (both break IL instructions and Debugger.Break () method calls), for example
3995 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3996 * untrusted or semi-trusted code.
3998 * @policy_callback will be called every time a break point instruction needs to
3999 * be inserted with the method argument being the method that calls Debugger.Break()
4000 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4001 * if it wants the breakpoint to not be effective in the given method.
4002 * #MONO_BREAK_POLICY_ALWAYS is the default.
4005 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4007 if (policy_callback)
4008 break_policy_func = policy_callback;
4010 break_policy_func = always_insert_breakpoint;
4014 should_insert_brekpoint (MonoMethod *method) {
4015 switch (break_policy_func (method)) {
4016 case MONO_BREAK_POLICY_ALWAYS:
4018 case MONO_BREAK_POLICY_NEVER:
4020 case MONO_BREAK_POLICY_ON_DBG:
4021 return mono_debug_using_mono_debugger ();
4023 g_warning ("Incorrect value returned from break policy callback");
4028 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4030 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4032 MonoInst *addr, *store, *load;
4033 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4035 /* the bounds check is already done by the callers */
4036 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4039 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4041 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4042 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4048 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4050 MonoInst *ins = NULL;
4051 #ifdef MONO_ARCH_SIMD_INTRINSICS
4052 if (cfg->opt & MONO_OPT_SIMD) {
4053 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4063 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4065 MonoInst *ins = NULL;
4067 static MonoClass *runtime_helpers_class = NULL;
4068 if (! runtime_helpers_class)
4069 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4070 "System.Runtime.CompilerServices", "RuntimeHelpers");
4072 if (cmethod->klass == mono_defaults.string_class) {
4073 if (strcmp (cmethod->name, "get_Chars") == 0) {
4074 int dreg = alloc_ireg (cfg);
4075 int index_reg = alloc_preg (cfg);
4076 int mult_reg = alloc_preg (cfg);
4077 int add_reg = alloc_preg (cfg);
4079 #if SIZEOF_REGISTER == 8
4080 /* The array reg is 64 bits but the index reg is only 32 */
4081 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4083 index_reg = args [1]->dreg;
4085 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4087 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4088 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4089 add_reg = ins->dreg;
4090 /* Avoid a warning */
4092 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4096 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4097 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4098 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4100 type_from_op (ins, NULL, NULL);
4102 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4103 int dreg = alloc_ireg (cfg);
4104 /* Decompose later to allow more optimizations */
4105 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4106 ins->type = STACK_I4;
4107 cfg->cbb->has_array_access = TRUE;
4108 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4111 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4112 int mult_reg = alloc_preg (cfg);
4113 int add_reg = alloc_preg (cfg);
4115 /* The corlib functions check for oob already. */
4116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4117 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4118 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4119 return cfg->cbb->last_ins;
4122 } else if (cmethod->klass == mono_defaults.object_class) {
4124 if (strcmp (cmethod->name, "GetType") == 0) {
4125 int dreg = alloc_preg (cfg);
4126 int vt_reg = alloc_preg (cfg);
4127 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4128 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4129 type_from_op (ins, NULL, NULL);
4132 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4133 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4134 int dreg = alloc_ireg (cfg);
4135 int t1 = alloc_ireg (cfg);
4137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4138 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4139 ins->type = STACK_I4;
4143 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4144 MONO_INST_NEW (cfg, ins, OP_NOP);
4145 MONO_ADD_INS (cfg->cbb, ins);
4149 } else if (cmethod->klass == mono_defaults.array_class) {
4150 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4151 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4152 if (cmethod->name [0] != 'g')
4155 if (strcmp (cmethod->name, "get_Rank") == 0) {
4156 int dreg = alloc_ireg (cfg);
4157 int vtable_reg = alloc_preg (cfg);
4158 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4159 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4160 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4161 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4162 type_from_op (ins, NULL, NULL);
4165 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4166 int dreg = alloc_ireg (cfg);
4168 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4169 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4170 type_from_op (ins, NULL, NULL);
4175 } else if (cmethod->klass == runtime_helpers_class) {
4177 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4178 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4182 } else if (cmethod->klass == mono_defaults.thread_class) {
4183 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4184 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4185 MONO_ADD_INS (cfg->cbb, ins);
4187 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4188 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4189 MONO_ADD_INS (cfg->cbb, ins);
4192 } else if (cmethod->klass == mono_defaults.monitor_class) {
4193 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4194 if (strcmp (cmethod->name, "Enter") == 0) {
4197 if (COMPILE_LLVM (cfg)) {
4199 * Pass the argument normally, the LLVM backend will handle the
4200 * calling convention problems.
4202 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4204 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4205 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4206 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4207 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4210 return (MonoInst*)call;
4211 } else if (strcmp (cmethod->name, "Exit") == 0) {
4214 if (COMPILE_LLVM (cfg)) {
4215 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4217 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4218 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4219 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4220 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4223 return (MonoInst*)call;
4225 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4226 MonoMethod *fast_method = NULL;
4228 /* Avoid infinite recursion */
4229 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4230 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4231 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4234 if (strcmp (cmethod->name, "Enter") == 0 ||
4235 strcmp (cmethod->name, "Exit") == 0)
4236 fast_method = mono_monitor_get_fast_path (cmethod);
4240 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4242 } else if (cmethod->klass->image == mono_defaults.corlib &&
4243 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4244 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4247 #if SIZEOF_REGISTER == 8
4248 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4249 /* 64 bit reads are already atomic */
4250 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4251 ins->dreg = mono_alloc_preg (cfg);
4252 ins->inst_basereg = args [0]->dreg;
4253 ins->inst_offset = 0;
4254 MONO_ADD_INS (cfg->cbb, ins);
4258 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4259 if (strcmp (cmethod->name, "Increment") == 0) {
4260 MonoInst *ins_iconst;
4263 if (fsig->params [0]->type == MONO_TYPE_I4)
4264 opcode = OP_ATOMIC_ADD_NEW_I4;
4265 #if SIZEOF_REGISTER == 8
4266 else if (fsig->params [0]->type == MONO_TYPE_I8)
4267 opcode = OP_ATOMIC_ADD_NEW_I8;
4270 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4271 ins_iconst->inst_c0 = 1;
4272 ins_iconst->dreg = mono_alloc_ireg (cfg);
4273 MONO_ADD_INS (cfg->cbb, ins_iconst);
4275 MONO_INST_NEW (cfg, ins, opcode);
4276 ins->dreg = mono_alloc_ireg (cfg);
4277 ins->inst_basereg = args [0]->dreg;
4278 ins->inst_offset = 0;
4279 ins->sreg2 = ins_iconst->dreg;
4280 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4281 MONO_ADD_INS (cfg->cbb, ins);
4283 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4284 MonoInst *ins_iconst;
4287 if (fsig->params [0]->type == MONO_TYPE_I4)
4288 opcode = OP_ATOMIC_ADD_NEW_I4;
4289 #if SIZEOF_REGISTER == 8
4290 else if (fsig->params [0]->type == MONO_TYPE_I8)
4291 opcode = OP_ATOMIC_ADD_NEW_I8;
4294 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4295 ins_iconst->inst_c0 = -1;
4296 ins_iconst->dreg = mono_alloc_ireg (cfg);
4297 MONO_ADD_INS (cfg->cbb, ins_iconst);
4299 MONO_INST_NEW (cfg, ins, opcode);
4300 ins->dreg = mono_alloc_ireg (cfg);
4301 ins->inst_basereg = args [0]->dreg;
4302 ins->inst_offset = 0;
4303 ins->sreg2 = ins_iconst->dreg;
4304 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4305 MONO_ADD_INS (cfg->cbb, ins);
4307 } else if (strcmp (cmethod->name, "Add") == 0) {
4310 if (fsig->params [0]->type == MONO_TYPE_I4)
4311 opcode = OP_ATOMIC_ADD_NEW_I4;
4312 #if SIZEOF_REGISTER == 8
4313 else if (fsig->params [0]->type == MONO_TYPE_I8)
4314 opcode = OP_ATOMIC_ADD_NEW_I8;
4318 MONO_INST_NEW (cfg, ins, opcode);
4319 ins->dreg = mono_alloc_ireg (cfg);
4320 ins->inst_basereg = args [0]->dreg;
4321 ins->inst_offset = 0;
4322 ins->sreg2 = args [1]->dreg;
4323 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4324 MONO_ADD_INS (cfg->cbb, ins);
4327 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4329 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4330 if (strcmp (cmethod->name, "Exchange") == 0) {
4332 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4334 if (fsig->params [0]->type == MONO_TYPE_I4)
4335 opcode = OP_ATOMIC_EXCHANGE_I4;
4336 #if SIZEOF_REGISTER == 8
4337 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4338 (fsig->params [0]->type == MONO_TYPE_I))
4339 opcode = OP_ATOMIC_EXCHANGE_I8;
4341 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4342 opcode = OP_ATOMIC_EXCHANGE_I4;
4347 MONO_INST_NEW (cfg, ins, opcode);
4348 ins->dreg = mono_alloc_ireg (cfg);
4349 ins->inst_basereg = args [0]->dreg;
4350 ins->inst_offset = 0;
4351 ins->sreg2 = args [1]->dreg;
4352 MONO_ADD_INS (cfg->cbb, ins);
4354 switch (fsig->params [0]->type) {
4356 ins->type = STACK_I4;
4360 ins->type = STACK_I8;
4362 case MONO_TYPE_OBJECT:
4363 ins->type = STACK_OBJ;
4366 g_assert_not_reached ();
4369 if (cfg->gen_write_barriers && is_ref) {
4370 MonoInst *dummy_use;
4371 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4372 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4373 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4376 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4378 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4379 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4381 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4382 if (fsig->params [1]->type == MONO_TYPE_I4)
4384 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4385 size = sizeof (gpointer);
4386 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4389 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4390 ins->dreg = alloc_ireg (cfg);
4391 ins->sreg1 = args [0]->dreg;
4392 ins->sreg2 = args [1]->dreg;
4393 ins->sreg3 = args [2]->dreg;
4394 ins->type = STACK_I4;
4395 MONO_ADD_INS (cfg->cbb, ins);
4396 } else if (size == 8) {
4397 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4398 ins->dreg = alloc_ireg (cfg);
4399 ins->sreg1 = args [0]->dreg;
4400 ins->sreg2 = args [1]->dreg;
4401 ins->sreg3 = args [2]->dreg;
4402 ins->type = STACK_I8;
4403 MONO_ADD_INS (cfg->cbb, ins);
4405 /* g_assert_not_reached (); */
4407 if (cfg->gen_write_barriers && is_ref) {
4408 MonoInst *dummy_use;
4409 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4410 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4411 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4414 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4418 } else if (cmethod->klass->image == mono_defaults.corlib) {
4419 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4420 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4421 if (should_insert_brekpoint (cfg->method))
4422 MONO_INST_NEW (cfg, ins, OP_BREAK);
4424 MONO_INST_NEW (cfg, ins, OP_NOP);
4425 MONO_ADD_INS (cfg->cbb, ins);
4428 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4429 && strcmp (cmethod->klass->name, "Environment") == 0) {
4431 EMIT_NEW_ICONST (cfg, ins, 1);
4433 EMIT_NEW_ICONST (cfg, ins, 0);
4437 } else if (cmethod->klass == mono_defaults.math_class) {
4439 * There is general branches code for Min/Max, but it does not work for
4441 * http://everything2.com/?node_id=1051618
4445 #ifdef MONO_ARCH_SIMD_INTRINSICS
4446 if (cfg->opt & MONO_OPT_SIMD) {
4447 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4453 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4457 * This entry point could be used later for arbitrary method
4460 inline static MonoInst*
4461 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4462 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4464 if (method->klass == mono_defaults.string_class) {
4465 /* managed string allocation support */
4466 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4467 MonoInst *iargs [2];
4468 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4469 MonoMethod *managed_alloc = NULL;
4471 g_assert (vtable); /*Should not fail since it System.String*/
4472 #ifndef MONO_CROSS_COMPILE
4473 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4477 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4478 iargs [1] = args [0];
4479 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4486 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4488 MonoInst *store, *temp;
4491 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4492 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4495 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4496 * would be different than the MonoInst's used to represent arguments, and
4497 * the ldelema implementation can't deal with that.
4498 * Solution: When ldelema is used on an inline argument, create a var for
4499 * it, emit ldelema on that var, and emit the saving code below in
4500 * inline_method () if needed.
4502 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4503 cfg->args [i] = temp;
4504 /* This uses cfg->args [i] which is set by the preceeding line */
4505 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4506 store->cil_code = sp [0]->cil_code;
4511 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4512 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4514 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4516 check_inline_called_method_name_limit (MonoMethod *called_method)
4519 static char *limit = NULL;
4521 if (limit == NULL) {
4522 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4524 if (limit_string != NULL)
4525 limit = limit_string;
4527 limit = (char *) "";
4530 if (limit [0] != '\0') {
4531 char *called_method_name = mono_method_full_name (called_method, TRUE);
4533 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4534 g_free (called_method_name);
4536 //return (strncmp_result <= 0);
4537 return (strncmp_result == 0);
4544 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4546 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4549 static char *limit = NULL;
4551 if (limit == NULL) {
4552 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4553 if (limit_string != NULL) {
4554 limit = limit_string;
4556 limit = (char *) "";
4560 if (limit [0] != '\0') {
4561 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4563 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4564 g_free (caller_method_name);
4566 //return (strncmp_result <= 0);
4567 return (strncmp_result == 0);
4575 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4576 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4578 MonoInst *ins, *rvar = NULL;
4579 MonoMethodHeader *cheader;
4580 MonoBasicBlock *ebblock, *sbblock;
4582 MonoMethod *prev_inlined_method;
4583 MonoInst **prev_locals, **prev_args;
4584 MonoType **prev_arg_types;
4585 guint prev_real_offset;
4586 GHashTable *prev_cbb_hash;
4587 MonoBasicBlock **prev_cil_offset_to_bb;
4588 MonoBasicBlock *prev_cbb;
4589 unsigned char* prev_cil_start;
4590 guint32 prev_cil_offset_to_bb_len;
4591 MonoMethod *prev_current_method;
4592 MonoGenericContext *prev_generic_context;
4593 gboolean ret_var_set, prev_ret_var_set;
4595 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4597 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4598 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4601 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4602 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4606 if (cfg->verbose_level > 2)
4607 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4609 if (!cmethod->inline_info) {
4610 mono_jit_stats.inlineable_methods++;
4611 cmethod->inline_info = 1;
4614 /* allocate local variables */
4615 cheader = mono_method_get_header (cmethod);
4617 if (cheader == NULL || mono_loader_get_last_error ()) {
4619 mono_metadata_free_mh (cheader);
4620 mono_loader_clear_error ();
4624 /* allocate space to store the return value */
4625 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4626 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4630 prev_locals = cfg->locals;
4631 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4632 for (i = 0; i < cheader->num_locals; ++i)
4633 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4635 /* allocate start and end blocks */
4636 /* This is needed so if the inline is aborted, we can clean up */
4637 NEW_BBLOCK (cfg, sbblock);
4638 sbblock->real_offset = real_offset;
4640 NEW_BBLOCK (cfg, ebblock);
4641 ebblock->block_num = cfg->num_bblocks++;
4642 ebblock->real_offset = real_offset;
4644 prev_args = cfg->args;
4645 prev_arg_types = cfg->arg_types;
4646 prev_inlined_method = cfg->inlined_method;
4647 cfg->inlined_method = cmethod;
4648 cfg->ret_var_set = FALSE;
4649 cfg->inline_depth ++;
4650 prev_real_offset = cfg->real_offset;
4651 prev_cbb_hash = cfg->cbb_hash;
4652 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4653 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4654 prev_cil_start = cfg->cil_start;
4655 prev_cbb = cfg->cbb;
4656 prev_current_method = cfg->current_method;
4657 prev_generic_context = cfg->generic_context;
4658 prev_ret_var_set = cfg->ret_var_set;
4660 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4662 ret_var_set = cfg->ret_var_set;
4664 cfg->inlined_method = prev_inlined_method;
4665 cfg->real_offset = prev_real_offset;
4666 cfg->cbb_hash = prev_cbb_hash;
4667 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4668 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4669 cfg->cil_start = prev_cil_start;
4670 cfg->locals = prev_locals;
4671 cfg->args = prev_args;
4672 cfg->arg_types = prev_arg_types;
4673 cfg->current_method = prev_current_method;
4674 cfg->generic_context = prev_generic_context;
4675 cfg->ret_var_set = prev_ret_var_set;
4676 cfg->inline_depth --;
4678 if ((costs >= 0 && costs < 60) || inline_allways) {
4679 if (cfg->verbose_level > 2)
4680 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4682 mono_jit_stats.inlined_methods++;
4684 /* always add some code to avoid block split failures */
4685 MONO_INST_NEW (cfg, ins, OP_NOP);
4686 MONO_ADD_INS (prev_cbb, ins);
4688 prev_cbb->next_bb = sbblock;
4689 link_bblock (cfg, prev_cbb, sbblock);
4692 * Get rid of the begin and end bblocks if possible to aid local
4695 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4697 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4698 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4700 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4701 MonoBasicBlock *prev = ebblock->in_bb [0];
4702 mono_merge_basic_blocks (cfg, prev, ebblock);
4704 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4705 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4706 cfg->cbb = prev_cbb;
4714 * If the inlined method contains only a throw, then the ret var is not
4715 * set, so set it to a dummy value.
4718 static double r8_0 = 0.0;
4720 switch (rvar->type) {
4722 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4725 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4730 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4733 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4734 ins->type = STACK_R8;
4735 ins->inst_p0 = (void*)&r8_0;
4736 ins->dreg = rvar->dreg;
4737 MONO_ADD_INS (cfg->cbb, ins);
4740 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4743 g_assert_not_reached ();
4747 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4750 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4753 if (cfg->verbose_level > 2)
4754 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4755 cfg->exception_type = MONO_EXCEPTION_NONE;
4756 mono_loader_clear_error ();
4758 /* This gets rid of the newly added bblocks */
4759 cfg->cbb = prev_cbb;
4761 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4766 * Some of these comments may well be out-of-date.
4767 * Design decisions: we do a single pass over the IL code (and we do bblock
4768 * splitting/merging in the few cases when it's required: a back jump to an IL
4769 * address that was not already seen as bblock starting point).
4770 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4771 * Complex operations are decomposed in simpler ones right away. We need to let the
4772 * arch-specific code peek and poke inside this process somehow (except when the
4773 * optimizations can take advantage of the full semantic info of coarse opcodes).
4774 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4775 * MonoInst->opcode initially is the IL opcode or some simplification of that
4776 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4777 * opcode with value bigger than OP_LAST.
4778 * At this point the IR can be handed over to an interpreter, a dumb code generator
4779 * or to the optimizing code generator that will translate it to SSA form.
4781 * Profiling directed optimizations.
4782 * We may compile by default with few or no optimizations and instrument the code
4783 * or the user may indicate what methods to optimize the most either in a config file
4784 * or through repeated runs where the compiler applies offline the optimizations to
4785 * each method and then decides if it was worth it.
4788 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4789 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4790 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4791 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4792 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4793 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4794 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4795 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4797 /* offset from br.s -> br like opcodes */
4798 #define BIG_BRANCH_OFFSET 13
4801 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4803 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4805 return b == NULL || b == bb;
4809 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4811 unsigned char *ip = start;
4812 unsigned char *target;
4815 MonoBasicBlock *bblock;
4816 const MonoOpcode *opcode;
4819 cli_addr = ip - start;
4820 i = mono_opcode_value ((const guint8 **)&ip, end);
4823 opcode = &mono_opcodes [i];
4824 switch (opcode->argument) {
4825 case MonoInlineNone:
4828 case MonoInlineString:
4829 case MonoInlineType:
4830 case MonoInlineField:
4831 case MonoInlineMethod:
4834 case MonoShortInlineR:
4841 case MonoShortInlineVar:
4842 case MonoShortInlineI:
4845 case MonoShortInlineBrTarget:
4846 target = start + cli_addr + 2 + (signed char)ip [1];
4847 GET_BBLOCK (cfg, bblock, target);
4850 GET_BBLOCK (cfg, bblock, ip);
4852 case MonoInlineBrTarget:
4853 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4854 GET_BBLOCK (cfg, bblock, target);
4857 GET_BBLOCK (cfg, bblock, ip);
4859 case MonoInlineSwitch: {
4860 guint32 n = read32 (ip + 1);
4863 cli_addr += 5 + 4 * n;
4864 target = start + cli_addr;
4865 GET_BBLOCK (cfg, bblock, target);
4867 for (j = 0; j < n; ++j) {
4868 target = start + cli_addr + (gint32)read32 (ip);
4869 GET_BBLOCK (cfg, bblock, target);
4879 g_assert_not_reached ();
4882 if (i == CEE_THROW) {
4883 unsigned char *bb_start = ip - 1;
4885 /* Find the start of the bblock containing the throw */
4887 while ((bb_start >= start) && !bblock) {
4888 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4892 bblock->out_of_line = 1;
4901 static inline MonoMethod *
4902 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4906 if (m->wrapper_type != MONO_WRAPPER_NONE)
4907 return mono_method_get_wrapper_data (m, token);
4909 method = mono_get_method_full (m->klass->image, token, klass, context);
4914 static inline MonoMethod *
4915 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4917 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4919 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4925 static inline MonoClass*
4926 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4930 if (method->wrapper_type != MONO_WRAPPER_NONE)
4931 klass = mono_method_get_wrapper_data (method, token);
4933 klass = mono_class_get_full (method->klass->image, token, context);
4935 mono_class_init (klass);
4940 * Returns TRUE if the JIT should abort inlining because "callee"
4941 * is influenced by security attributes.
4944 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4948 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4952 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4953 if (result == MONO_JIT_SECURITY_OK)
4956 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4957 /* Generate code to throw a SecurityException before the actual call/link */
4958 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4961 NEW_ICONST (cfg, args [0], 4);
4962 NEW_METHODCONST (cfg, args [1], caller);
4963 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4964 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4965 /* don't hide previous results */
4966 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4967 cfg->exception_data = result;
4975 throw_exception (void)
4977 static MonoMethod *method = NULL;
4980 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4981 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4988 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4990 MonoMethod *thrower = throw_exception ();
4993 EMIT_NEW_PCONST (cfg, args [0], ex);
4994 mono_emit_method_call (cfg, thrower, args, NULL);
4998 * Return the original method is a wrapper is specified. We can only access
4999 * the custom attributes from the original method.
5002 get_original_method (MonoMethod *method)
5004 if (method->wrapper_type == MONO_WRAPPER_NONE)
5007 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5008 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5011 /* in other cases we need to find the original method */
5012 return mono_marshal_method_from_wrapper (method);
5016 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5017 MonoBasicBlock *bblock, unsigned char *ip)
5019 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5020 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5022 emit_throw_exception (cfg, ex);
5026 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5027 MonoBasicBlock *bblock, unsigned char *ip)
5029 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5030 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5032 emit_throw_exception (cfg, ex);
5036 * Check that the IL instructions at ip are the array initialization
5037 * sequence and return the pointer to the data and the size.
5040 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5043 * newarr[System.Int32]
5045 * ldtoken field valuetype ...
5046 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5048 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5049 guint32 token = read32 (ip + 7);
5050 guint32 field_token = read32 (ip + 2);
5051 guint32 field_index = field_token & 0xffffff;
5053 const char *data_ptr;
5055 MonoMethod *cmethod;
5056 MonoClass *dummy_class;
5057 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5063 *out_field_token = field_token;
5065 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5068 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5070 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5071 case MONO_TYPE_BOOLEAN:
5075 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5076 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5077 case MONO_TYPE_CHAR:
5087 return NULL; /* stupid ARM FP swapped format */
5097 if (size > mono_type_size (field->type, &dummy_align))
5100 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5101 if (!method->klass->image->dynamic) {
5102 field_index = read32 (ip + 2) & 0xffffff;
5103 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5104 data_ptr = mono_image_rva_map (method->klass->image, rva);
5105 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5106 /* for aot code we do the lookup on load */
5107 if (aot && data_ptr)
5108 return GUINT_TO_POINTER (rva);
5110 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5112 data_ptr = mono_field_get_data (field);
5120 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5122 char *method_fname = mono_method_full_name (method, TRUE);
5124 MonoMethodHeader *header = mono_method_get_header (method);
5126 if (header->code_size == 0)
5127 method_code = g_strdup ("method body is empty.");
5129 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5130 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5131 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5132 g_free (method_fname);
5133 g_free (method_code);
5134 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5138 set_exception_object (MonoCompile *cfg, MonoException *exception)
5140 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5141 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5142 cfg->exception_ptr = exception;
5146 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5150 if (cfg->generic_sharing_context)
5151 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5153 type = &klass->byval_arg;
5154 return MONO_TYPE_IS_REFERENCE (type);
5158 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5161 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5162 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5163 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5164 /* Optimize reg-reg moves away */
5166 * Can't optimize other opcodes, since sp[0] might point to
5167 * the last ins of a decomposed opcode.
5169 sp [0]->dreg = (cfg)->locals [n]->dreg;
5171 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5176 * ldloca inhibits many optimizations so try to get rid of it in common
5179 static inline unsigned char *
5180 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5189 local = read16 (ip + 2);
5193 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5194 gboolean skip = FALSE;
5196 /* From the INITOBJ case */
5197 token = read32 (ip + 2);
5198 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5199 CHECK_TYPELOAD (klass);
5200 if (generic_class_is_reference_type (cfg, klass)) {
5201 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5202 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5203 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5204 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5205 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5218 is_exception_class (MonoClass *class)
5221 if (class == mono_defaults.exception_class)
5223 class = class->parent;
5229 * mono_method_to_ir:
5231 * Translate the .net IL into linear IR.
5234 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5235 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5236 guint inline_offset, gboolean is_virtual_call)
5239 MonoInst *ins, **sp, **stack_start;
5240 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5241 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5242 MonoMethod *cmethod, *method_definition;
5243 MonoInst **arg_array;
5244 MonoMethodHeader *header;
5246 guint32 token, ins_flag;
5248 MonoClass *constrained_call = NULL;
5249 unsigned char *ip, *end, *target, *err_pos;
5250 static double r8_0 = 0.0;
5251 MonoMethodSignature *sig;
5252 MonoGenericContext *generic_context = NULL;
5253 MonoGenericContainer *generic_container = NULL;
5254 MonoType **param_types;
5255 int i, n, start_new_bblock, dreg;
5256 int num_calls = 0, inline_costs = 0;
5257 int breakpoint_id = 0;
5259 MonoBoolean security, pinvoke;
5260 MonoSecurityManager* secman = NULL;
5261 MonoDeclSecurityActions actions;
5262 GSList *class_inits = NULL;
5263 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5265 gboolean init_locals, seq_points, skip_dead_blocks;
5267 /* serialization and xdomain stuff may need access to private fields and methods */
5268 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5269 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5270 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5271 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5272 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5273 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5275 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5277 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5278 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5279 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5280 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5282 image = method->klass->image;
5283 header = mono_method_get_header (method);
5285 MonoLoaderError *error;
5287 if ((error = mono_loader_get_last_error ())) {
5288 cfg->exception_type = error->exception_type;
5290 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5291 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5293 goto exception_exit;
5295 generic_container = mono_method_get_generic_container (method);
5296 sig = mono_method_signature (method);
5297 num_args = sig->hasthis + sig->param_count;
5298 ip = (unsigned char*)header->code;
5299 cfg->cil_start = ip;
5300 end = ip + header->code_size;
5301 mono_jit_stats.cil_code_size += header->code_size;
5302 init_locals = header->init_locals;
5304 seq_points = cfg->gen_seq_points && cfg->method == method;
5307 * Methods without init_locals set could cause asserts in various passes
5312 method_definition = method;
5313 while (method_definition->is_inflated) {
5314 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5315 method_definition = imethod->declaring;
5318 /* SkipVerification is not allowed if core-clr is enabled */
5319 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5321 dont_verify_stloc = TRUE;
5324 if (!dont_verify && mini_method_verify (cfg, method_definition))
5325 goto exception_exit;
5327 if (mono_debug_using_mono_debugger ())
5328 cfg->keep_cil_nops = TRUE;
5330 if (sig->is_inflated)
5331 generic_context = mono_method_get_context (method);
5332 else if (generic_container)
5333 generic_context = &generic_container->context;
5334 cfg->generic_context = generic_context;
5336 if (!cfg->generic_sharing_context)
5337 g_assert (!sig->has_type_parameters);
5339 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5340 g_assert (method->is_inflated);
5341 g_assert (mono_method_get_context (method)->method_inst);
5343 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5344 g_assert (sig->generic_param_count);
5346 if (cfg->method == method) {
5347 cfg->real_offset = 0;
5349 cfg->real_offset = inline_offset;
5352 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5353 cfg->cil_offset_to_bb_len = header->code_size;
5355 cfg->current_method = method;
5357 if (cfg->verbose_level > 2)
5358 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5360 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5362 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5363 for (n = 0; n < sig->param_count; ++n)
5364 param_types [n + sig->hasthis] = sig->params [n];
5365 cfg->arg_types = param_types;
5367 dont_inline = g_list_prepend (dont_inline, method);
5368 if (cfg->method == method) {
5370 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5371 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5374 NEW_BBLOCK (cfg, start_bblock);
5375 cfg->bb_entry = start_bblock;
5376 start_bblock->cil_code = NULL;
5377 start_bblock->cil_length = 0;
5380 NEW_BBLOCK (cfg, end_bblock);
5381 cfg->bb_exit = end_bblock;
5382 end_bblock->cil_code = NULL;
5383 end_bblock->cil_length = 0;
5384 g_assert (cfg->num_bblocks == 2);
5386 arg_array = cfg->args;
5388 if (header->num_clauses) {
5389 cfg->spvars = g_hash_table_new (NULL, NULL);
5390 cfg->exvars = g_hash_table_new (NULL, NULL);
5392 /* handle exception clauses */
5393 for (i = 0; i < header->num_clauses; ++i) {
5394 MonoBasicBlock *try_bb;
5395 MonoExceptionClause *clause = &header->clauses [i];
5396 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5397 try_bb->real_offset = clause->try_offset;
5398 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5399 tblock->real_offset = clause->handler_offset;
5400 tblock->flags |= BB_EXCEPTION_HANDLER;
5402 link_bblock (cfg, try_bb, tblock);
5404 if (*(ip + clause->handler_offset) == CEE_POP)
5405 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5407 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5408 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5409 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5410 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5411 MONO_ADD_INS (tblock, ins);
5413 /* todo: is a fault block unsafe to optimize? */
5414 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5415 tblock->flags |= BB_EXCEPTION_UNSAFE;
5419 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5421 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5423 /* catch and filter blocks get the exception object on the stack */
5424 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5425 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5426 MonoInst *dummy_use;
5428 /* mostly like handle_stack_args (), but just sets the input args */
5429 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5430 tblock->in_scount = 1;
5431 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5432 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5435 * Add a dummy use for the exvar so its liveness info will be
5439 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5441 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5442 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5443 tblock->flags |= BB_EXCEPTION_HANDLER;
5444 tblock->real_offset = clause->data.filter_offset;
5445 tblock->in_scount = 1;
5446 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5447 /* The filter block shares the exvar with the handler block */
5448 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5449 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5450 MONO_ADD_INS (tblock, ins);
5454 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5455 clause->data.catch_class &&
5456 cfg->generic_sharing_context &&
5457 mono_class_check_context_used (clause->data.catch_class)) {
5459 * In shared generic code with catch
5460 * clauses containing type variables
5461 * the exception handling code has to
5462 * be able to get to the rgctx.
5463 * Therefore we have to make sure that
5464 * the vtable/mrgctx argument (for
5465 * static or generic methods) or the
5466 * "this" argument (for non-static
5467 * methods) are live.
5469 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5470 mini_method_get_context (method)->method_inst ||
5471 method->klass->valuetype) {
5472 mono_get_vtable_var (cfg);
5474 MonoInst *dummy_use;
5476 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5481 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5482 cfg->cbb = start_bblock;
5483 cfg->args = arg_array;
5484 mono_save_args (cfg, sig, inline_args);
5487 /* FIRST CODE BLOCK */
5488 NEW_BBLOCK (cfg, bblock);
5489 bblock->cil_code = ip;
5493 ADD_BBLOCK (cfg, bblock);
5495 if (cfg->method == method) {
5496 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5497 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5498 MONO_INST_NEW (cfg, ins, OP_BREAK);
5499 MONO_ADD_INS (bblock, ins);
5503 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5504 secman = mono_security_manager_get_methods ();
5506 security = (secman && mono_method_has_declsec (method));
5507 /* at this point having security doesn't mean we have any code to generate */
5508 if (security && (cfg->method == method)) {
5509 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5510 * And we do not want to enter the next section (with allocation) if we
5511 * have nothing to generate */
5512 security = mono_declsec_get_demands (method, &actions);
5515 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5516 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5518 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5519 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5520 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5522 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5523 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5527 mono_custom_attrs_free (custom);
5530 custom = mono_custom_attrs_from_class (wrapped->klass);
5531 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5535 mono_custom_attrs_free (custom);
5538 /* not a P/Invoke after all */
5543 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5544 /* we use a separate basic block for the initialization code */
5545 NEW_BBLOCK (cfg, init_localsbb);
5546 cfg->bb_init = init_localsbb;
5547 init_localsbb->real_offset = cfg->real_offset;
5548 start_bblock->next_bb = init_localsbb;
5549 init_localsbb->next_bb = bblock;
5550 link_bblock (cfg, start_bblock, init_localsbb);
5551 link_bblock (cfg, init_localsbb, bblock);
5553 cfg->cbb = init_localsbb;
5555 start_bblock->next_bb = bblock;
5556 link_bblock (cfg, start_bblock, bblock);
5559 /* at this point we know, if security is TRUE, that some code needs to be generated */
5560 if (security && (cfg->method == method)) {
5563 mono_jit_stats.cas_demand_generation++;
5565 if (actions.demand.blob) {
5566 /* Add code for SecurityAction.Demand */
5567 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5568 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5569 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5570 mono_emit_method_call (cfg, secman->demand, args, NULL);
5572 if (actions.noncasdemand.blob) {
5573 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5574 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5575 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5576 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5577 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5578 mono_emit_method_call (cfg, secman->demand, args, NULL);
5580 if (actions.demandchoice.blob) {
5581 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5582 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5583 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5584 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5585 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5589 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5591 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5594 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5595 /* check if this is native code, e.g. an icall or a p/invoke */
5596 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5597 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5599 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5600 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5602 /* if this ia a native call then it can only be JITted from platform code */
5603 if ((icall || pinvk) && method->klass && method->klass->image) {
5604 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5605 MonoException *ex = icall ? mono_get_exception_security () :
5606 mono_get_exception_method_access ();
5607 emit_throw_exception (cfg, ex);
5614 if (header->code_size == 0)
5617 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5622 if (cfg->method == method)
5623 mono_debug_init_method (cfg, bblock, breakpoint_id);
5625 for (n = 0; n < header->num_locals; ++n) {
5626 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5631 /* We force the vtable variable here for all shared methods
5632 for the possibility that they might show up in a stack
5633 trace where their exact instantiation is needed. */
5634 if (cfg->generic_sharing_context && method == cfg->method) {
5635 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5636 mini_method_get_context (method)->method_inst ||
5637 method->klass->valuetype) {
5638 mono_get_vtable_var (cfg);
5640 /* FIXME: Is there a better way to do this?
5641 We need the variable live for the duration
5642 of the whole method. */
5643 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5647 /* add a check for this != NULL to inlined methods */
5648 if (is_virtual_call) {
5651 NEW_ARGLOAD (cfg, arg_ins, 0);
5652 MONO_ADD_INS (cfg->cbb, arg_ins);
5653 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5656 skip_dead_blocks = !dont_verify;
5657 if (skip_dead_blocks) {
5658 original_bb = bb = mono_basic_block_split (method, &error);
5659 if (!mono_error_ok (&error)) {
5660 mono_error_cleanup (&error);
5666 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5667 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5670 start_new_bblock = 0;
5673 if (cfg->method == method)
5674 cfg->real_offset = ip - header->code;
5676 cfg->real_offset = inline_offset;
5681 if (start_new_bblock) {
5682 bblock->cil_length = ip - bblock->cil_code;
5683 if (start_new_bblock == 2) {
5684 g_assert (ip == tblock->cil_code);
5686 GET_BBLOCK (cfg, tblock, ip);
5688 bblock->next_bb = tblock;
5691 start_new_bblock = 0;
5692 for (i = 0; i < bblock->in_scount; ++i) {
5693 if (cfg->verbose_level > 3)
5694 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5695 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5699 g_slist_free (class_inits);
5702 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5703 link_bblock (cfg, bblock, tblock);
5704 if (sp != stack_start) {
5705 handle_stack_args (cfg, stack_start, sp - stack_start);
5707 CHECK_UNVERIFIABLE (cfg);
5709 bblock->next_bb = tblock;
5712 for (i = 0; i < bblock->in_scount; ++i) {
5713 if (cfg->verbose_level > 3)
5714 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5715 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5718 g_slist_free (class_inits);
5723 if (skip_dead_blocks) {
5724 int ip_offset = ip - header->code;
5726 if (ip_offset == bb->end)
5730 int op_size = mono_opcode_size (ip, end);
5731 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5733 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5735 if (ip_offset + op_size == bb->end) {
5736 MONO_INST_NEW (cfg, ins, OP_NOP);
5737 MONO_ADD_INS (bblock, ins);
5738 start_new_bblock = 1;
5746 * Sequence points are points where the debugger can place a breakpoint.
5747 * Currently, we generate these automatically at points where the IL
5750 if (seq_points && sp == stack_start) {
5751 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5752 MONO_ADD_INS (cfg->cbb, ins);
5755 bblock->real_offset = cfg->real_offset;
5757 if ((cfg->method == method) && cfg->coverage_info) {
5758 guint32 cil_offset = ip - header->code;
5759 cfg->coverage_info->data [cil_offset].cil_code = ip;
5761 /* TODO: Use an increment here */
5762 #if defined(TARGET_X86)
5763 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5764 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5766 MONO_ADD_INS (cfg->cbb, ins);
5768 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5769 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5773 if (cfg->verbose_level > 3)
5774 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5778 if (cfg->keep_cil_nops)
5779 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5781 MONO_INST_NEW (cfg, ins, OP_NOP);
5783 MONO_ADD_INS (bblock, ins);
5786 if (should_insert_brekpoint (cfg->method))
5787 MONO_INST_NEW (cfg, ins, OP_BREAK);
5789 MONO_INST_NEW (cfg, ins, OP_NOP);
5791 MONO_ADD_INS (bblock, ins);
5797 CHECK_STACK_OVF (1);
5798 n = (*ip)-CEE_LDARG_0;
5800 EMIT_NEW_ARGLOAD (cfg, ins, n);
5808 CHECK_STACK_OVF (1);
5809 n = (*ip)-CEE_LDLOC_0;
5811 EMIT_NEW_LOCLOAD (cfg, ins, n);
5820 n = (*ip)-CEE_STLOC_0;
5823 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5825 emit_stloc_ir (cfg, sp, header, n);
5832 CHECK_STACK_OVF (1);
5835 EMIT_NEW_ARGLOAD (cfg, ins, n);
5841 CHECK_STACK_OVF (1);
5844 NEW_ARGLOADA (cfg, ins, n);
5845 MONO_ADD_INS (cfg->cbb, ins);
5855 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5857 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5862 CHECK_STACK_OVF (1);
5865 EMIT_NEW_LOCLOAD (cfg, ins, n);
5869 case CEE_LDLOCA_S: {
5870 unsigned char *tmp_ip;
5872 CHECK_STACK_OVF (1);
5873 CHECK_LOCAL (ip [1]);
5875 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5881 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5890 CHECK_LOCAL (ip [1]);
5891 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5893 emit_stloc_ir (cfg, sp, header, ip [1]);
5898 CHECK_STACK_OVF (1);
5899 EMIT_NEW_PCONST (cfg, ins, NULL);
5900 ins->type = STACK_OBJ;
5905 CHECK_STACK_OVF (1);
5906 EMIT_NEW_ICONST (cfg, ins, -1);
5919 CHECK_STACK_OVF (1);
5920 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5926 CHECK_STACK_OVF (1);
5928 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5934 CHECK_STACK_OVF (1);
5935 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5941 CHECK_STACK_OVF (1);
5942 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5943 ins->type = STACK_I8;
5944 ins->dreg = alloc_dreg (cfg, STACK_I8);
5946 ins->inst_l = (gint64)read64 (ip);
5947 MONO_ADD_INS (bblock, ins);
5953 gboolean use_aotconst = FALSE;
5955 #ifdef TARGET_POWERPC
5956 /* FIXME: Clean this up */
5957 if (cfg->compile_aot)
5958 use_aotconst = TRUE;
5961 /* FIXME: we should really allocate this only late in the compilation process */
5962 f = mono_domain_alloc (cfg->domain, sizeof (float));
5964 CHECK_STACK_OVF (1);
5970 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5972 dreg = alloc_freg (cfg);
5973 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5974 ins->type = STACK_R8;
5976 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5977 ins->type = STACK_R8;
5978 ins->dreg = alloc_dreg (cfg, STACK_R8);
5980 MONO_ADD_INS (bblock, ins);
5990 gboolean use_aotconst = FALSE;
5992 #ifdef TARGET_POWERPC
5993 /* FIXME: Clean this up */
5994 if (cfg->compile_aot)
5995 use_aotconst = TRUE;
5998 /* FIXME: we should really allocate this only late in the compilation process */
5999 d = mono_domain_alloc (cfg->domain, sizeof (double));
6001 CHECK_STACK_OVF (1);
6007 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6009 dreg = alloc_freg (cfg);
6010 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6011 ins->type = STACK_R8;
6013 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6014 ins->type = STACK_R8;
6015 ins->dreg = alloc_dreg (cfg, STACK_R8);
6017 MONO_ADD_INS (bblock, ins);
6026 MonoInst *temp, *store;
6028 CHECK_STACK_OVF (1);
6032 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6033 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6035 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6038 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6051 if (sp [0]->type == STACK_R8)
6052 /* we need to pop the value from the x86 FP stack */
6053 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6062 if (stack_start != sp)
6064 token = read32 (ip + 1);
6065 /* FIXME: check the signature matches */
6066 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6071 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6072 GENERIC_SHARING_FAILURE (CEE_JMP);
6074 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6075 CHECK_CFG_EXCEPTION;
6077 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6079 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6082 /* Handle tail calls similarly to calls */
6083 n = fsig->param_count + fsig->hasthis;
6085 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6086 call->method = cmethod;
6087 call->tail_call = TRUE;
6088 call->signature = mono_method_signature (cmethod);
6089 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6090 call->inst.inst_p0 = cmethod;
6091 for (i = 0; i < n; ++i)
6092 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6094 mono_arch_emit_call (cfg, call);
6095 MONO_ADD_INS (bblock, (MonoInst*)call);
6098 for (i = 0; i < num_args; ++i)
6099 /* Prevent arguments from being optimized away */
6100 arg_array [i]->flags |= MONO_INST_VOLATILE;
6102 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6103 ins = (MonoInst*)call;
6104 ins->inst_p0 = cmethod;
6105 MONO_ADD_INS (bblock, ins);
6109 start_new_bblock = 1;
6114 case CEE_CALLVIRT: {
6115 MonoInst *addr = NULL;
6116 MonoMethodSignature *fsig = NULL;
6118 int virtual = *ip == CEE_CALLVIRT;
6119 int calli = *ip == CEE_CALLI;
6120 gboolean pass_imt_from_rgctx = FALSE;
6121 MonoInst *imt_arg = NULL;
6122 gboolean pass_vtable = FALSE;
6123 gboolean pass_mrgctx = FALSE;
6124 MonoInst *vtable_arg = NULL;
6125 gboolean check_this = FALSE;
6126 gboolean supported_tail_call = FALSE;
6129 token = read32 (ip + 1);
6136 if (method->wrapper_type != MONO_WRAPPER_NONE)
6137 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6139 fsig = mono_metadata_parse_signature (image, token);
6141 n = fsig->param_count + fsig->hasthis;
6143 if (method->dynamic && fsig->pinvoke) {
6147 * This is a call through a function pointer using a pinvoke
6148 * signature. Have to create a wrapper and call that instead.
6149 * FIXME: This is very slow, need to create a wrapper at JIT time
6150 * instead based on the signature.
6152 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6153 EMIT_NEW_PCONST (cfg, args [1], fsig);
6155 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6158 MonoMethod *cil_method;
6160 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6161 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6162 cil_method = cmethod;
6163 } else if (constrained_call) {
6164 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6166 * This is needed since get_method_constrained can't find
6167 * the method in klass representing a type var.
6168 * The type var is guaranteed to be a reference type in this
6171 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6172 cil_method = cmethod;
6173 g_assert (!cmethod->klass->valuetype);
6175 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6178 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6179 cil_method = cmethod;
6184 if (!dont_verify && !cfg->skip_visibility) {
6185 MonoMethod *target_method = cil_method;
6186 if (method->is_inflated) {
6187 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6189 if (!mono_method_can_access_method (method_definition, target_method) &&
6190 !mono_method_can_access_method (method, cil_method))
6191 METHOD_ACCESS_FAILURE;
6194 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6195 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6197 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6198 /* MS.NET seems to silently convert this to a callvirt */
6203 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6204 * converts to a callvirt.
6206 * tests/bug-515884.il is an example of this behavior
6208 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6209 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6210 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6214 if (!cmethod->klass->inited)
6215 if (!mono_class_init (cmethod->klass))
6218 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6219 mini_class_is_system_array (cmethod->klass)) {
6220 array_rank = cmethod->klass->rank;
6221 fsig = mono_method_signature (cmethod);
6223 fsig = mono_method_signature (cmethod);
6228 if (fsig->pinvoke) {
6229 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6230 check_for_pending_exc, FALSE);
6231 fsig = mono_method_signature (wrapper);
6232 } else if (constrained_call) {
6233 fsig = mono_method_signature (cmethod);
6235 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6239 mono_save_token_info (cfg, image, token, cil_method);
6241 n = fsig->param_count + fsig->hasthis;
6243 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6244 if (check_linkdemand (cfg, method, cmethod))
6246 CHECK_CFG_EXCEPTION;
6249 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6250 g_assert_not_reached ();
6253 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6256 if (!cfg->generic_sharing_context && cmethod)
6257 g_assert (!mono_method_check_context_used (cmethod));
6261 //g_assert (!virtual || fsig->hasthis);
6265 if (constrained_call) {
6267 * We have the `constrained.' prefix opcode.
6269 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6271 * The type parameter is instantiated as a valuetype,
6272 * but that type doesn't override the method we're
6273 * calling, so we need to box `this'.
6275 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6276 ins->klass = constrained_call;
6277 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6278 CHECK_CFG_EXCEPTION;
6279 } else if (!constrained_call->valuetype) {
6280 int dreg = alloc_preg (cfg);
6283 * The type parameter is instantiated as a reference
6284 * type. We have a managed pointer on the stack, so
6285 * we need to dereference it here.
6287 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6288 ins->type = STACK_OBJ;
6290 } else if (cmethod->klass->valuetype)
6292 constrained_call = NULL;
6295 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6299 * If the callee is a shared method, then its static cctor
6300 * might not get called after the call was patched.
6302 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6303 emit_generic_class_init (cfg, cmethod->klass);
6304 CHECK_TYPELOAD (cmethod->klass);
6307 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6308 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6309 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6310 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6311 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6314 * Pass vtable iff target method might
6315 * be shared, which means that sharing
6316 * is enabled for its class and its
6317 * context is sharable (and it's not a
6320 if (sharing_enabled && context_sharable &&
6321 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6325 if (cmethod && mini_method_get_context (cmethod) &&
6326 mini_method_get_context (cmethod)->method_inst) {
6327 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6328 MonoGenericContext *context = mini_method_get_context (cmethod);
6329 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6331 g_assert (!pass_vtable);
6333 if (sharing_enabled && context_sharable)
6337 if (cfg->generic_sharing_context && cmethod) {
6338 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6340 context_used = mono_method_check_context_used (cmethod);
6342 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6343 /* Generic method interface
6344 calls are resolved via a
6345 helper function and don't
6347 if (!cmethod_context || !cmethod_context->method_inst)
6348 pass_imt_from_rgctx = TRUE;
6352 * If a shared method calls another
6353 * shared method then the caller must
6354 * have a generic sharing context
6355 * because the magic trampoline
6356 * requires it. FIXME: We shouldn't
6357 * have to force the vtable/mrgctx
6358 * variable here. Instead there
6359 * should be a flag in the cfg to
6360 * request a generic sharing context.
6363 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6364 mono_get_vtable_var (cfg);
6369 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6371 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6373 CHECK_TYPELOAD (cmethod->klass);
6374 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6379 g_assert (!vtable_arg);
6381 if (!cfg->compile_aot) {
6383 * emit_get_rgctx_method () calls mono_class_vtable () so check
6384 * for type load errors before.
6386 mono_class_setup_vtable (cmethod->klass);
6387 CHECK_TYPELOAD (cmethod->klass);
6390 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6392 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6393 MONO_METHOD_IS_FINAL (cmethod)) {
6400 if (pass_imt_from_rgctx) {
6401 g_assert (!pass_vtable);
6404 imt_arg = emit_get_rgctx_method (cfg, context_used,
6405 cmethod, MONO_RGCTX_INFO_METHOD);
6409 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6411 /* Calling virtual generic methods */
6412 if (cmethod && virtual &&
6413 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6414 !(MONO_METHOD_IS_FINAL (cmethod) &&
6415 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6416 mono_method_signature (cmethod)->generic_param_count) {
6417 MonoInst *this_temp, *this_arg_temp, *store;
6418 MonoInst *iargs [4];
6420 g_assert (mono_method_signature (cmethod)->is_inflated);
6422 /* Prevent inlining of methods that contain indirect calls */
6425 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6426 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6427 g_assert (!imt_arg);
6429 g_assert (cmethod->is_inflated);
6430 imt_arg = emit_get_rgctx_method (cfg, context_used,
6431 cmethod, MONO_RGCTX_INFO_METHOD);
6432 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6436 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6437 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6438 MONO_ADD_INS (bblock, store);
6440 /* FIXME: This should be a managed pointer */
6441 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6443 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6444 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6445 cmethod, MONO_RGCTX_INFO_METHOD);
6446 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6447 addr = mono_emit_jit_icall (cfg,
6448 mono_helper_compile_generic_method, iargs);
6450 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6452 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6455 if (!MONO_TYPE_IS_VOID (fsig->ret))
6456 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6458 CHECK_CFG_EXCEPTION;
6465 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6466 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6468 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6472 /* FIXME: runtime generic context pointer for jumps? */
6473 /* FIXME: handle this for generic sharing eventually */
6474 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6477 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6480 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6481 /* Handle tail calls similarly to calls */
6482 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6484 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6485 call->tail_call = TRUE;
6486 call->method = cmethod;
6487 call->signature = mono_method_signature (cmethod);
6490 * We implement tail calls by storing the actual arguments into the
6491 * argument variables, then emitting a CEE_JMP.
6493 for (i = 0; i < n; ++i) {
6494 /* Prevent argument from being register allocated */
6495 arg_array [i]->flags |= MONO_INST_VOLATILE;
6496 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6500 ins = (MonoInst*)call;
6501 ins->inst_p0 = cmethod;
6502 ins->inst_p1 = arg_array [0];
6503 MONO_ADD_INS (bblock, ins);
6504 link_bblock (cfg, bblock, end_bblock);
6505 start_new_bblock = 1;
6507 CHECK_CFG_EXCEPTION;
6509 /* skip CEE_RET as well */
6515 /* Conversion to a JIT intrinsic */
6516 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6517 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6518 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6523 CHECK_CFG_EXCEPTION;
6531 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6532 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6533 mono_method_check_inlining (cfg, cmethod) &&
6534 !g_list_find (dont_inline, cmethod)) {
6536 gboolean allways = FALSE;
6538 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6539 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6540 /* Prevent inlining of methods that call wrappers */
6542 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6546 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6548 cfg->real_offset += 5;
6551 if (!MONO_TYPE_IS_VOID (fsig->ret))
6552 /* *sp is already set by inline_method */
6555 inline_costs += costs;
6561 inline_costs += 10 * num_calls++;
6563 /* Tail recursion elimination */
6564 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6565 gboolean has_vtargs = FALSE;
6568 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6571 /* keep it simple */
6572 for (i = fsig->param_count - 1; i >= 0; i--) {
6573 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6578 for (i = 0; i < n; ++i)
6579 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6580 MONO_INST_NEW (cfg, ins, OP_BR);
6581 MONO_ADD_INS (bblock, ins);
6582 tblock = start_bblock->out_bb [0];
6583 link_bblock (cfg, bblock, tblock);
6584 ins->inst_target_bb = tblock;
6585 start_new_bblock = 1;
6587 /* skip the CEE_RET, too */
6588 if (ip_in_bb (cfg, bblock, ip + 5))
6598 /* Generic sharing */
6599 /* FIXME: only do this for generic methods if
6600 they are not shared! */
6601 if (context_used && !imt_arg && !array_rank &&
6602 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6603 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6604 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6605 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6608 g_assert (cfg->generic_sharing_context && cmethod);
6612 * We are compiling a call to a
6613 * generic method from shared code,
6614 * which means that we have to look up
6615 * the method in the rgctx and do an
6618 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6621 /* Indirect calls */
6623 g_assert (!imt_arg);
6625 if (*ip == CEE_CALL)
6626 g_assert (context_used);
6627 else if (*ip == CEE_CALLI)
6628 g_assert (!vtable_arg);
6630 /* FIXME: what the hell is this??? */
6631 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6632 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6634 /* Prevent inlining of methods with indirect calls */
6639 int rgctx_reg = mono_alloc_preg (cfg);
6641 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6642 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6643 call = (MonoCallInst*)ins;
6644 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6646 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6648 * Instead of emitting an indirect call, emit a direct call
6649 * with the contents of the aotconst as the patch info.
6651 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6653 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6654 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6657 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6660 if (!MONO_TYPE_IS_VOID (fsig->ret))
6661 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6663 CHECK_CFG_EXCEPTION;
6674 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6675 if (sp [fsig->param_count]->type == STACK_OBJ) {
6676 MonoInst *iargs [2];
6679 iargs [1] = sp [fsig->param_count];
6681 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6684 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6685 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6686 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6687 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6689 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6692 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6693 if (!cmethod->klass->element_class->valuetype && !readonly)
6694 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6695 CHECK_TYPELOAD (cmethod->klass);
6698 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6701 g_assert_not_reached ();
6704 CHECK_CFG_EXCEPTION;
6711 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6713 if (!MONO_TYPE_IS_VOID (fsig->ret))
6714 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6716 CHECK_CFG_EXCEPTION;
6726 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6728 } else if (imt_arg) {
6729 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6731 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6734 if (!MONO_TYPE_IS_VOID (fsig->ret))
6735 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6737 CHECK_CFG_EXCEPTION;
6744 if (cfg->method != method) {
6745 /* return from inlined method */
6747 * If in_count == 0, that means the ret is unreachable due to
6748 * being preceeded by a throw. In that case, inline_method () will
6749 * handle setting the return value
6750 * (test case: test_0_inline_throw ()).
6752 if (return_var && cfg->cbb->in_count) {
6756 //g_assert (returnvar != -1);
6757 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6758 cfg->ret_var_set = TRUE;
6762 MonoType *ret_type = mono_method_signature (method)->ret;
6766 * Place a seq point here too even through the IL stack is not
6767 * empty, so a step over on
6770 * will work correctly.
6772 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6773 MONO_ADD_INS (cfg->cbb, ins);
6776 g_assert (!return_var);
6779 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6782 if (!cfg->vret_addr) {
6785 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6787 EMIT_NEW_RETLOADA (cfg, ret_addr);
6789 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6790 ins->klass = mono_class_from_mono_type (ret_type);
6793 #ifdef MONO_ARCH_SOFT_FLOAT
6794 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6795 MonoInst *iargs [1];
6799 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6800 mono_arch_emit_setret (cfg, method, conv);
6802 mono_arch_emit_setret (cfg, method, *sp);
6805 mono_arch_emit_setret (cfg, method, *sp);
6810 if (sp != stack_start)
6812 MONO_INST_NEW (cfg, ins, OP_BR);
6814 ins->inst_target_bb = end_bblock;
6815 MONO_ADD_INS (bblock, ins);
6816 link_bblock (cfg, bblock, end_bblock);
6817 start_new_bblock = 1;
6821 MONO_INST_NEW (cfg, ins, OP_BR);
6823 target = ip + 1 + (signed char)(*ip);
6825 GET_BBLOCK (cfg, tblock, target);
6826 link_bblock (cfg, bblock, tblock);
6827 ins->inst_target_bb = tblock;
6828 if (sp != stack_start) {
6829 handle_stack_args (cfg, stack_start, sp - stack_start);
6831 CHECK_UNVERIFIABLE (cfg);
6833 MONO_ADD_INS (bblock, ins);
6834 start_new_bblock = 1;
6835 inline_costs += BRANCH_COST;
6849 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6851 target = ip + 1 + *(signed char*)ip;
6857 inline_costs += BRANCH_COST;
6861 MONO_INST_NEW (cfg, ins, OP_BR);
6864 target = ip + 4 + (gint32)read32(ip);
6866 GET_BBLOCK (cfg, tblock, target);
6867 link_bblock (cfg, bblock, tblock);
6868 ins->inst_target_bb = tblock;
6869 if (sp != stack_start) {
6870 handle_stack_args (cfg, stack_start, sp - stack_start);
6872 CHECK_UNVERIFIABLE (cfg);
6875 MONO_ADD_INS (bblock, ins);
6877 start_new_bblock = 1;
6878 inline_costs += BRANCH_COST;
6885 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6886 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6887 guint32 opsize = is_short ? 1 : 4;
6889 CHECK_OPSIZE (opsize);
6891 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6894 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6899 GET_BBLOCK (cfg, tblock, target);
6900 link_bblock (cfg, bblock, tblock);
6901 GET_BBLOCK (cfg, tblock, ip);
6902 link_bblock (cfg, bblock, tblock);
6904 if (sp != stack_start) {
6905 handle_stack_args (cfg, stack_start, sp - stack_start);
6906 CHECK_UNVERIFIABLE (cfg);
6909 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6910 cmp->sreg1 = sp [0]->dreg;
6911 type_from_op (cmp, sp [0], NULL);
6914 #if SIZEOF_REGISTER == 4
6915 if (cmp->opcode == OP_LCOMPARE_IMM) {
6916 /* Convert it to OP_LCOMPARE */
6917 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6918 ins->type = STACK_I8;
6919 ins->dreg = alloc_dreg (cfg, STACK_I8);
6921 MONO_ADD_INS (bblock, ins);
6922 cmp->opcode = OP_LCOMPARE;
6923 cmp->sreg2 = ins->dreg;
6926 MONO_ADD_INS (bblock, cmp);
6928 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6929 type_from_op (ins, sp [0], NULL);
6930 MONO_ADD_INS (bblock, ins);
6931 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6932 GET_BBLOCK (cfg, tblock, target);
6933 ins->inst_true_bb = tblock;
6934 GET_BBLOCK (cfg, tblock, ip);
6935 ins->inst_false_bb = tblock;
6936 start_new_bblock = 2;
6939 inline_costs += BRANCH_COST;
6954 MONO_INST_NEW (cfg, ins, *ip);
6956 target = ip + 4 + (gint32)read32(ip);
6962 inline_costs += BRANCH_COST;
6966 MonoBasicBlock **targets;
6967 MonoBasicBlock *default_bblock;
6968 MonoJumpInfoBBTable *table;
6969 int offset_reg = alloc_preg (cfg);
6970 int target_reg = alloc_preg (cfg);
6971 int table_reg = alloc_preg (cfg);
6972 int sum_reg = alloc_preg (cfg);
6973 gboolean use_op_switch;
6977 n = read32 (ip + 1);
6980 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6984 CHECK_OPSIZE (n * sizeof (guint32));
6985 target = ip + n * sizeof (guint32);
6987 GET_BBLOCK (cfg, default_bblock, target);
6989 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6990 for (i = 0; i < n; ++i) {
6991 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6992 targets [i] = tblock;
6996 if (sp != stack_start) {
6998 * Link the current bb with the targets as well, so handle_stack_args
6999 * will set their in_stack correctly.
7001 link_bblock (cfg, bblock, default_bblock);
7002 for (i = 0; i < n; ++i)
7003 link_bblock (cfg, bblock, targets [i]);
7005 handle_stack_args (cfg, stack_start, sp - stack_start);
7007 CHECK_UNVERIFIABLE (cfg);
7010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7011 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7014 for (i = 0; i < n; ++i)
7015 link_bblock (cfg, bblock, targets [i]);
7017 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7018 table->table = targets;
7019 table->table_size = n;
7021 use_op_switch = FALSE;
7023 /* ARM implements SWITCH statements differently */
7024 /* FIXME: Make it use the generic implementation */
7025 if (!cfg->compile_aot)
7026 use_op_switch = TRUE;
7029 if (COMPILE_LLVM (cfg))
7030 use_op_switch = TRUE;
7032 cfg->cbb->has_jump_table = 1;
7034 if (use_op_switch) {
7035 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7036 ins->sreg1 = src1->dreg;
7037 ins->inst_p0 = table;
7038 ins->inst_many_bb = targets;
7039 ins->klass = GUINT_TO_POINTER (n);
7040 MONO_ADD_INS (cfg->cbb, ins);
7042 if (sizeof (gpointer) == 8)
7043 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7047 #if SIZEOF_REGISTER == 8
7048 /* The upper word might not be zero, and we add it to a 64 bit address later */
7049 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7052 if (cfg->compile_aot) {
7053 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7055 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7056 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7057 ins->inst_p0 = table;
7058 ins->dreg = table_reg;
7059 MONO_ADD_INS (cfg->cbb, ins);
7062 /* FIXME: Use load_memindex */
7063 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7065 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7067 start_new_bblock = 1;
7068 inline_costs += (BRANCH_COST * 2);
7088 dreg = alloc_freg (cfg);
7091 dreg = alloc_lreg (cfg);
7094 dreg = alloc_preg (cfg);
7097 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7098 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7099 ins->flags |= ins_flag;
7101 MONO_ADD_INS (bblock, ins);
7116 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7117 ins->flags |= ins_flag;
7119 MONO_ADD_INS (bblock, ins);
7121 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7122 MonoInst *dummy_use;
7123 /* insert call to write barrier */
7124 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7125 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7126 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7136 MONO_INST_NEW (cfg, ins, (*ip));
7138 ins->sreg1 = sp [0]->dreg;
7139 ins->sreg2 = sp [1]->dreg;
7140 type_from_op (ins, sp [0], sp [1]);
7142 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7144 /* Use the immediate opcodes if possible */
7145 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7146 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7147 if (imm_opcode != -1) {
7148 ins->opcode = imm_opcode;
7149 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7152 sp [1]->opcode = OP_NOP;
7156 MONO_ADD_INS ((cfg)->cbb, (ins));
7158 *sp++ = mono_decompose_opcode (cfg, ins);
7175 MONO_INST_NEW (cfg, ins, (*ip));
7177 ins->sreg1 = sp [0]->dreg;
7178 ins->sreg2 = sp [1]->dreg;
7179 type_from_op (ins, sp [0], sp [1]);
7181 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7182 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7184 /* FIXME: Pass opcode to is_inst_imm */
7186 /* Use the immediate opcodes if possible */
7187 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7190 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7191 if (imm_opcode != -1) {
7192 ins->opcode = imm_opcode;
7193 if (sp [1]->opcode == OP_I8CONST) {
7194 #if SIZEOF_REGISTER == 8
7195 ins->inst_imm = sp [1]->inst_l;
7197 ins->inst_ls_word = sp [1]->inst_ls_word;
7198 ins->inst_ms_word = sp [1]->inst_ms_word;
7202 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7205 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7206 if (sp [1]->next == NULL)
7207 sp [1]->opcode = OP_NOP;
7210 MONO_ADD_INS ((cfg)->cbb, (ins));
7212 *sp++ = mono_decompose_opcode (cfg, ins);
7225 case CEE_CONV_OVF_I8:
7226 case CEE_CONV_OVF_U8:
7230 /* Special case this earlier so we have long constants in the IR */
7231 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7232 int data = sp [-1]->inst_c0;
7233 sp [-1]->opcode = OP_I8CONST;
7234 sp [-1]->type = STACK_I8;
7235 #if SIZEOF_REGISTER == 8
7236 if ((*ip) == CEE_CONV_U8)
7237 sp [-1]->inst_c0 = (guint32)data;
7239 sp [-1]->inst_c0 = data;
7241 sp [-1]->inst_ls_word = data;
7242 if ((*ip) == CEE_CONV_U8)
7243 sp [-1]->inst_ms_word = 0;
7245 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7247 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7254 case CEE_CONV_OVF_I4:
7255 case CEE_CONV_OVF_I1:
7256 case CEE_CONV_OVF_I2:
7257 case CEE_CONV_OVF_I:
7258 case CEE_CONV_OVF_U:
7261 if (sp [-1]->type == STACK_R8) {
7262 ADD_UNOP (CEE_CONV_OVF_I8);
7269 case CEE_CONV_OVF_U1:
7270 case CEE_CONV_OVF_U2:
7271 case CEE_CONV_OVF_U4:
7274 if (sp [-1]->type == STACK_R8) {
7275 ADD_UNOP (CEE_CONV_OVF_U8);
7282 case CEE_CONV_OVF_I1_UN:
7283 case CEE_CONV_OVF_I2_UN:
7284 case CEE_CONV_OVF_I4_UN:
7285 case CEE_CONV_OVF_I8_UN:
7286 case CEE_CONV_OVF_U1_UN:
7287 case CEE_CONV_OVF_U2_UN:
7288 case CEE_CONV_OVF_U4_UN:
7289 case CEE_CONV_OVF_U8_UN:
7290 case CEE_CONV_OVF_I_UN:
7291 case CEE_CONV_OVF_U_UN:
7298 CHECK_CFG_EXCEPTION;
7302 case CEE_ADD_OVF_UN:
7304 case CEE_MUL_OVF_UN:
7306 case CEE_SUB_OVF_UN:
7314 token = read32 (ip + 1);
7315 klass = mini_get_class (method, token, generic_context);
7316 CHECK_TYPELOAD (klass);
7318 if (generic_class_is_reference_type (cfg, klass)) {
7319 MonoInst *store, *load;
7320 int dreg = alloc_preg (cfg);
7322 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7323 load->flags |= ins_flag;
7324 MONO_ADD_INS (cfg->cbb, load);
7326 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7327 store->flags |= ins_flag;
7328 MONO_ADD_INS (cfg->cbb, store);
7330 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7331 MonoInst *dummy_use;
7332 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7333 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7334 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7337 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7349 token = read32 (ip + 1);
7350 klass = mini_get_class (method, token, generic_context);
7351 CHECK_TYPELOAD (klass);
7353 /* Optimize the common ldobj+stloc combination */
7363 loc_index = ip [5] - CEE_STLOC_0;
7370 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7371 CHECK_LOCAL (loc_index);
7373 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7374 ins->dreg = cfg->locals [loc_index]->dreg;
7380 /* Optimize the ldobj+stobj combination */
7381 /* The reference case ends up being a load+store anyway */
7382 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7387 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7403 CHECK_STACK_OVF (1);
7405 n = read32 (ip + 1);
7407 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7408 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7409 ins->type = STACK_OBJ;
7412 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7413 MonoInst *iargs [1];
7415 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7416 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7418 if (cfg->opt & MONO_OPT_SHARED) {
7419 MonoInst *iargs [3];
7421 if (cfg->compile_aot) {
7422 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7424 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7425 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7426 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7427 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7428 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7430 if (bblock->out_of_line) {
7431 MonoInst *iargs [2];
7433 if (image == mono_defaults.corlib) {
7435 * Avoid relocations in AOT and save some space by using a
7436 * version of helper_ldstr specialized to mscorlib.
7438 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7439 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7441 /* Avoid creating the string object */
7442 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7443 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7444 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7448 if (cfg->compile_aot) {
7449 NEW_LDSTRCONST (cfg, ins, image, n);
7451 MONO_ADD_INS (bblock, ins);
7454 NEW_PCONST (cfg, ins, NULL);
7455 ins->type = STACK_OBJ;
7456 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7458 MONO_ADD_INS (bblock, ins);
7467 MonoInst *iargs [2];
7468 MonoMethodSignature *fsig;
7471 MonoInst *vtable_arg = NULL;
7474 token = read32 (ip + 1);
7475 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7478 fsig = mono_method_get_signature (cmethod, image, token);
7482 mono_save_token_info (cfg, image, token, cmethod);
7484 if (!mono_class_init (cmethod->klass))
7487 if (cfg->generic_sharing_context)
7488 context_used = mono_method_check_context_used (cmethod);
7490 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7491 if (check_linkdemand (cfg, method, cmethod))
7493 CHECK_CFG_EXCEPTION;
7494 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7495 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7498 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7499 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7500 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7501 mono_class_vtable (cfg->domain, cmethod->klass);
7502 CHECK_TYPELOAD (cmethod->klass);
7504 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7505 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7508 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7509 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7511 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7513 CHECK_TYPELOAD (cmethod->klass);
7514 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7519 n = fsig->param_count;
7523 * Generate smaller code for the common newobj <exception> instruction in
7524 * argument checking code.
7526 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7527 is_exception_class (cmethod->klass) && n <= 2 &&
7528 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7529 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7530 MonoInst *iargs [3];
7532 g_assert (!vtable_arg);
7536 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7539 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7543 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7548 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7551 g_assert_not_reached ();
7559 /* move the args to allow room for 'this' in the first position */
7565 /* check_call_signature () requires sp[0] to be set */
7566 this_ins.type = STACK_OBJ;
7568 if (check_call_signature (cfg, fsig, sp))
7573 if (mini_class_is_system_array (cmethod->klass)) {
7574 g_assert (!vtable_arg);
7576 *sp = emit_get_rgctx_method (cfg, context_used,
7577 cmethod, MONO_RGCTX_INFO_METHOD);
7579 /* Avoid varargs in the common case */
7580 if (fsig->param_count == 1)
7581 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7582 else if (fsig->param_count == 2)
7583 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7584 else if (fsig->param_count == 3)
7585 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7587 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7588 } else if (cmethod->string_ctor) {
7589 g_assert (!context_used);
7590 g_assert (!vtable_arg);
7591 /* we simply pass a null pointer */
7592 EMIT_NEW_PCONST (cfg, *sp, NULL);
7593 /* now call the string ctor */
7594 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7596 MonoInst* callvirt_this_arg = NULL;
7598 if (cmethod->klass->valuetype) {
7599 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7600 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7601 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7606 * The code generated by mini_emit_virtual_call () expects
7607 * iargs [0] to be a boxed instance, but luckily the vcall
7608 * will be transformed into a normal call there.
7610 } else if (context_used) {
7611 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7614 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7616 CHECK_TYPELOAD (cmethod->klass);
7619 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7620 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7621 * As a workaround, we call class cctors before allocating objects.
7623 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7624 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7625 if (cfg->verbose_level > 2)
7626 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7627 class_inits = g_slist_prepend (class_inits, vtable);
7630 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7633 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7636 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7638 /* Now call the actual ctor */
7639 /* Avoid virtual calls to ctors if possible */
7640 if (cmethod->klass->marshalbyref)
7641 callvirt_this_arg = sp [0];
7644 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7645 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7646 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7651 CHECK_CFG_EXCEPTION;
7656 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7657 mono_method_check_inlining (cfg, cmethod) &&
7658 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7659 !g_list_find (dont_inline, cmethod)) {
7662 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7663 cfg->real_offset += 5;
7666 inline_costs += costs - 5;
7669 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7671 } else if (context_used &&
7672 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7673 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7674 MonoInst *cmethod_addr;
7676 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7677 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7679 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7682 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7683 callvirt_this_arg, NULL, vtable_arg);
7687 if (alloc == NULL) {
7689 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7690 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7704 token = read32 (ip + 1);
7705 klass = mini_get_class (method, token, generic_context);
7706 CHECK_TYPELOAD (klass);
7707 if (sp [0]->type != STACK_OBJ)
7710 if (cfg->generic_sharing_context)
7711 context_used = mono_class_check_context_used (klass);
7713 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7720 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7722 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7726 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7727 MonoMethod *mono_castclass;
7728 MonoInst *iargs [1];
7731 mono_castclass = mono_marshal_get_castclass (klass);
7734 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7735 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7736 g_assert (costs > 0);
7739 cfg->real_offset += 5;
7744 inline_costs += costs;
7747 ins = handle_castclass (cfg, klass, *sp, context_used);
7748 CHECK_CFG_EXCEPTION;
7758 token = read32 (ip + 1);
7759 klass = mini_get_class (method, token, generic_context);
7760 CHECK_TYPELOAD (klass);
7761 if (sp [0]->type != STACK_OBJ)
7764 if (cfg->generic_sharing_context)
7765 context_used = mono_class_check_context_used (klass);
7767 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7774 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7776 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7780 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7781 MonoMethod *mono_isinst;
7782 MonoInst *iargs [1];
7785 mono_isinst = mono_marshal_get_isinst (klass);
7788 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7789 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7790 g_assert (costs > 0);
7793 cfg->real_offset += 5;
7798 inline_costs += costs;
7801 ins = handle_isinst (cfg, klass, *sp, context_used);
7802 CHECK_CFG_EXCEPTION;
7809 case CEE_UNBOX_ANY: {
7813 token = read32 (ip + 1);
7814 klass = mini_get_class (method, token, generic_context);
7815 CHECK_TYPELOAD (klass);
7817 mono_save_token_info (cfg, image, token, klass);
7819 if (cfg->generic_sharing_context)
7820 context_used = mono_class_check_context_used (klass);
7822 if (generic_class_is_reference_type (cfg, klass)) {
7823 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7825 MonoInst *iargs [2];
7830 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7831 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7835 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7836 MonoMethod *mono_castclass;
7837 MonoInst *iargs [1];
7840 mono_castclass = mono_marshal_get_castclass (klass);
7843 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7844 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7846 g_assert (costs > 0);
7849 cfg->real_offset += 5;
7853 inline_costs += costs;
7855 ins = handle_castclass (cfg, klass, *sp, 0);
7856 CHECK_CFG_EXCEPTION;
7864 if (mono_class_is_nullable (klass)) {
7865 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7872 ins = handle_unbox (cfg, klass, sp, context_used);
7878 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7891 token = read32 (ip + 1);
7892 klass = mini_get_class (method, token, generic_context);
7893 CHECK_TYPELOAD (klass);
7895 mono_save_token_info (cfg, image, token, klass);
7897 if (cfg->generic_sharing_context)
7898 context_used = mono_class_check_context_used (klass);
7900 if (generic_class_is_reference_type (cfg, klass)) {
7906 if (klass == mono_defaults.void_class)
7908 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7910 /* frequent check in generic code: box (struct), brtrue */
7911 if (!mono_class_is_nullable (klass) &&
7912 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7913 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7915 MONO_INST_NEW (cfg, ins, OP_BR);
7916 if (*ip == CEE_BRTRUE_S) {
7919 target = ip + 1 + (signed char)(*ip);
7924 target = ip + 4 + (gint)(read32 (ip));
7927 GET_BBLOCK (cfg, tblock, target);
7928 link_bblock (cfg, bblock, tblock);
7929 ins->inst_target_bb = tblock;
7930 GET_BBLOCK (cfg, tblock, ip);
7932 * This leads to some inconsistency, since the two bblocks are
7933 * not really connected, but it is needed for handling stack
7934 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7935 * FIXME: This should only be needed if sp != stack_start, but that
7936 * doesn't work for some reason (test failure in mcs/tests on x86).
7938 link_bblock (cfg, bblock, tblock);
7939 if (sp != stack_start) {
7940 handle_stack_args (cfg, stack_start, sp - stack_start);
7942 CHECK_UNVERIFIABLE (cfg);
7944 MONO_ADD_INS (bblock, ins);
7945 start_new_bblock = 1;
7949 *sp++ = handle_box (cfg, val, klass, context_used);
7951 CHECK_CFG_EXCEPTION;
7960 token = read32 (ip + 1);
7961 klass = mini_get_class (method, token, generic_context);
7962 CHECK_TYPELOAD (klass);
7964 mono_save_token_info (cfg, image, token, klass);
7966 if (cfg->generic_sharing_context)
7967 context_used = mono_class_check_context_used (klass);
7969 if (mono_class_is_nullable (klass)) {
7972 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7973 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7977 ins = handle_unbox (cfg, klass, sp, context_used);
7987 MonoClassField *field;
7991 if (*ip == CEE_STFLD) {
7998 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8000 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8003 token = read32 (ip + 1);
8004 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8005 field = mono_method_get_wrapper_data (method, token);
8006 klass = field->parent;
8009 field = mono_field_from_token (image, token, &klass, generic_context);
8013 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8014 FIELD_ACCESS_FAILURE;
8015 mono_class_init (klass);
8017 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8018 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8019 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8020 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8023 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8024 if (*ip == CEE_STFLD) {
8025 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8027 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8028 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8029 MonoInst *iargs [5];
8032 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8033 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8034 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8038 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8039 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8040 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8041 g_assert (costs > 0);
8043 cfg->real_offset += 5;
8046 inline_costs += costs;
8048 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8053 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8055 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8056 if (sp [0]->opcode != OP_LDADDR)
8057 store->flags |= MONO_INST_FAULT;
8059 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8060 /* insert call to write barrier */
8061 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8062 MonoInst *iargs [2], *dummy_use;
8065 dreg = alloc_preg (cfg);
8066 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8068 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8070 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8073 store->flags |= ins_flag;
8080 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8081 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8082 MonoInst *iargs [4];
8085 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8086 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8087 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8088 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8089 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8090 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8092 g_assert (costs > 0);
8094 cfg->real_offset += 5;
8098 inline_costs += costs;
8100 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8104 if (sp [0]->type == STACK_VTYPE) {
8107 /* Have to compute the address of the variable */
8109 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8111 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8113 g_assert (var->klass == klass);
8115 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8119 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8121 if (*ip == CEE_LDFLDA) {
8122 dreg = alloc_preg (cfg);
8124 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8125 ins->klass = mono_class_from_mono_type (field->type);
8126 ins->type = STACK_MP;
8131 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8132 load->flags |= ins_flag;
8133 if (sp [0]->opcode != OP_LDADDR)
8134 load->flags |= MONO_INST_FAULT;
8145 MonoClassField *field;
8146 gpointer addr = NULL;
8147 gboolean is_special_static;
8150 token = read32 (ip + 1);
8152 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8153 field = mono_method_get_wrapper_data (method, token);
8154 klass = field->parent;
8157 field = mono_field_from_token (image, token, &klass, generic_context);
8160 mono_class_init (klass);
8161 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8162 FIELD_ACCESS_FAILURE;
8164 /* if the class is Critical then transparent code cannot access it's fields */
8165 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8166 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8169 * We can only support shared generic static
8170 * field access on architectures where the
8171 * trampoline code has been extended to handle
8172 * the generic class init.
8174 #ifndef MONO_ARCH_VTABLE_REG
8175 GENERIC_SHARING_FAILURE (*ip);
8178 if (cfg->generic_sharing_context)
8179 context_used = mono_class_check_context_used (klass);
8181 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8183 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8184 * to be called here.
8186 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8187 mono_class_vtable (cfg->domain, klass);
8188 CHECK_TYPELOAD (klass);
8190 mono_domain_lock (cfg->domain);
8191 if (cfg->domain->special_static_fields)
8192 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8193 mono_domain_unlock (cfg->domain);
8195 is_special_static = mono_class_field_is_special_static (field);
8197 /* Generate IR to compute the field address */
8198 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8200 * Fast access to TLS data
8201 * Inline version of get_thread_static_data () in
8205 int idx, static_data_reg, array_reg, dreg;
8206 MonoInst *thread_ins;
8208 // offset &= 0x7fffffff;
8209 // idx = (offset >> 24) - 1;
8210 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8212 thread_ins = mono_get_thread_intrinsic (cfg);
8213 MONO_ADD_INS (cfg->cbb, thread_ins);
8214 static_data_reg = alloc_ireg (cfg);
8215 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8217 if (cfg->compile_aot) {
8218 int offset_reg, offset2_reg, idx_reg;
8220 /* For TLS variables, this will return the TLS offset */
8221 EMIT_NEW_SFLDACONST (cfg, ins, field);
8222 offset_reg = ins->dreg;
8223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8224 idx_reg = alloc_ireg (cfg);
8225 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8226 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8228 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8229 array_reg = alloc_ireg (cfg);
8230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8231 offset2_reg = alloc_ireg (cfg);
8232 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8233 dreg = alloc_ireg (cfg);
8234 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8236 offset = (gsize)addr & 0x7fffffff;
8237 idx = (offset >> 24) - 1;
8239 array_reg = alloc_ireg (cfg);
8240 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8241 dreg = alloc_ireg (cfg);
8242 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8244 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8245 (cfg->compile_aot && is_special_static) ||
8246 (context_used && is_special_static)) {
8247 MonoInst *iargs [2];
8249 g_assert (field->parent);
8250 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8252 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8253 field, MONO_RGCTX_INFO_CLASS_FIELD);
8255 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8257 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8258 } else if (context_used) {
8259 MonoInst *static_data;
8262 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8263 method->klass->name_space, method->klass->name, method->name,
8264 depth, field->offset);
8267 if (mono_class_needs_cctor_run (klass, method))
8268 emit_generic_class_init (cfg, klass);
8271 * The pointer we're computing here is
8273 * super_info.static_data + field->offset
8275 static_data = emit_get_rgctx_klass (cfg, context_used,
8276 klass, MONO_RGCTX_INFO_STATIC_DATA);
8278 if (field->offset == 0) {
8281 int addr_reg = mono_alloc_preg (cfg);
8282 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8284 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8285 MonoInst *iargs [2];
8287 g_assert (field->parent);
8288 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8289 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8290 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8292 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8294 CHECK_TYPELOAD (klass);
8296 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8297 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8298 if (cfg->verbose_level > 2)
8299 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8300 class_inits = g_slist_prepend (class_inits, vtable);
8302 if (cfg->run_cctors) {
8304 /* This makes so that inline cannot trigger */
8305 /* .cctors: too many apps depend on them */
8306 /* running with a specific order... */
8307 if (! vtable->initialized)
8309 ex = mono_runtime_class_init_full (vtable, FALSE);
8311 set_exception_object (cfg, ex);
8312 goto exception_exit;
8316 addr = (char*)vtable->data + field->offset;
8318 if (cfg->compile_aot)
8319 EMIT_NEW_SFLDACONST (cfg, ins, field);
8321 EMIT_NEW_PCONST (cfg, ins, addr);
8323 MonoInst *iargs [1];
8324 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8325 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8329 /* Generate IR to do the actual load/store operation */
8331 if (*ip == CEE_LDSFLDA) {
8332 ins->klass = mono_class_from_mono_type (field->type);
8333 ins->type = STACK_PTR;
8335 } else if (*ip == CEE_STSFLD) {
8340 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8341 store->flags |= ins_flag;
8343 gboolean is_const = FALSE;
8344 MonoVTable *vtable = NULL;
8346 if (!context_used) {
8347 vtable = mono_class_vtable (cfg->domain, klass);
8348 CHECK_TYPELOAD (klass);
8350 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8351 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8352 gpointer addr = (char*)vtable->data + field->offset;
8353 int ro_type = field->type->type;
8354 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8355 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8357 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8360 case MONO_TYPE_BOOLEAN:
8362 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8366 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8369 case MONO_TYPE_CHAR:
8371 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8375 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8380 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8384 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8390 case MONO_TYPE_FNPTR:
8391 #ifndef HAVE_MOVING_COLLECTOR
8392 case MONO_TYPE_STRING:
8393 case MONO_TYPE_OBJECT:
8394 case MONO_TYPE_CLASS:
8395 case MONO_TYPE_SZARRAY:
8396 case MONO_TYPE_ARRAY:
8398 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8399 type_to_eval_stack_type ((cfg), field->type, *sp);
8404 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8409 case MONO_TYPE_VALUETYPE:
8419 CHECK_STACK_OVF (1);
8421 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8422 load->flags |= ins_flag;
8435 token = read32 (ip + 1);
8436 klass = mini_get_class (method, token, generic_context);
8437 CHECK_TYPELOAD (klass);
8438 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8439 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8440 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8441 generic_class_is_reference_type (cfg, klass)) {
8442 MonoInst *dummy_use;
8443 /* insert call to write barrier */
8444 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8445 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8446 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8458 const char *data_ptr;
8460 guint32 field_token;
8466 token = read32 (ip + 1);
8468 klass = mini_get_class (method, token, generic_context);
8469 CHECK_TYPELOAD (klass);
8471 if (cfg->generic_sharing_context)
8472 context_used = mono_class_check_context_used (klass);
8474 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8475 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8476 ins->sreg1 = sp [0]->dreg;
8477 ins->type = STACK_I4;
8478 ins->dreg = alloc_ireg (cfg);
8479 MONO_ADD_INS (cfg->cbb, ins);
8480 *sp = mono_decompose_opcode (cfg, ins);
8485 MonoClass *array_class = mono_array_class_get (klass, 1);
8486 /* FIXME: we cannot get a managed
8487 allocator because we can't get the
8488 open generic class's vtable. We
8489 have the same problem in
8490 handle_alloc(). This
8491 needs to be solved so that we can
8492 have managed allocs of shared
8495 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8496 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8498 MonoMethod *managed_alloc = NULL;
8500 /* FIXME: Decompose later to help abcrem */
8503 args [0] = emit_get_rgctx_klass (cfg, context_used,
8504 array_class, MONO_RGCTX_INFO_VTABLE);
8509 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8511 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8513 if (cfg->opt & MONO_OPT_SHARED) {
8514 /* Decompose now to avoid problems with references to the domainvar */
8515 MonoInst *iargs [3];
8517 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8518 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8521 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8523 /* Decompose later since it is needed by abcrem */
8524 MonoClass *array_type = mono_array_class_get (klass, 1);
8525 mono_class_vtable (cfg->domain, array_type);
8526 CHECK_TYPELOAD (array_type);
8528 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8529 ins->dreg = alloc_preg (cfg);
8530 ins->sreg1 = sp [0]->dreg;
8531 ins->inst_newa_class = klass;
8532 ins->type = STACK_OBJ;
8534 MONO_ADD_INS (cfg->cbb, ins);
8535 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8536 cfg->cbb->has_array_access = TRUE;
8538 /* Needed so mono_emit_load_get_addr () gets called */
8539 mono_get_got_var (cfg);
8549 * we inline/optimize the initialization sequence if possible.
8550 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8551 * for small sizes open code the memcpy
8552 * ensure the rva field is big enough
8554 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8555 MonoMethod *memcpy_method = get_memcpy_method ();
8556 MonoInst *iargs [3];
8557 int add_reg = alloc_preg (cfg);
8559 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8560 if (cfg->compile_aot) {
8561 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8563 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8565 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8566 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8575 if (sp [0]->type != STACK_OBJ)
8578 dreg = alloc_preg (cfg);
8579 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8580 ins->dreg = alloc_preg (cfg);
8581 ins->sreg1 = sp [0]->dreg;
8582 ins->type = STACK_I4;
8583 MONO_ADD_INS (cfg->cbb, ins);
8584 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8585 cfg->cbb->has_array_access = TRUE;
8593 if (sp [0]->type != STACK_OBJ)
8596 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8598 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8599 CHECK_TYPELOAD (klass);
8600 /* we need to make sure that this array is exactly the type it needs
8601 * to be for correctness. the wrappers are lax with their usage
8602 * so we need to ignore them here
8604 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8605 MonoClass *array_class = mono_array_class_get (klass, 1);
8606 mini_emit_check_array_type (cfg, sp [0], array_class);
8607 CHECK_TYPELOAD (array_class);
8611 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8626 case CEE_LDELEM_REF: {
8632 if (*ip == CEE_LDELEM) {
8634 token = read32 (ip + 1);
8635 klass = mini_get_class (method, token, generic_context);
8636 CHECK_TYPELOAD (klass);
8637 mono_class_init (klass);
8640 klass = array_access_to_klass (*ip);
8642 if (sp [0]->type != STACK_OBJ)
8645 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8647 if (sp [1]->opcode == OP_ICONST) {
8648 int array_reg = sp [0]->dreg;
8649 int index_reg = sp [1]->dreg;
8650 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8652 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8653 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8655 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8656 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8659 if (*ip == CEE_LDELEM)
8672 case CEE_STELEM_REF:
8679 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8681 if (*ip == CEE_STELEM) {
8683 token = read32 (ip + 1);
8684 klass = mini_get_class (method, token, generic_context);
8685 CHECK_TYPELOAD (klass);
8686 mono_class_init (klass);
8689 klass = array_access_to_klass (*ip);
8691 if (sp [0]->type != STACK_OBJ)
8694 /* storing a NULL doesn't need any of the complex checks in stelemref */
8695 if (generic_class_is_reference_type (cfg, klass) &&
8696 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8697 MonoMethod* helper = mono_marshal_get_stelemref ();
8698 MonoInst *iargs [3];
8700 if (sp [0]->type != STACK_OBJ)
8702 if (sp [2]->type != STACK_OBJ)
8709 mono_emit_method_call (cfg, helper, iargs, NULL);
8711 if (sp [1]->opcode == OP_ICONST) {
8712 int array_reg = sp [0]->dreg;
8713 int index_reg = sp [1]->dreg;
8714 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8716 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8717 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8719 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8720 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8724 if (*ip == CEE_STELEM)
8731 case CEE_CKFINITE: {
8735 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8736 ins->sreg1 = sp [0]->dreg;
8737 ins->dreg = alloc_freg (cfg);
8738 ins->type = STACK_R8;
8739 MONO_ADD_INS (bblock, ins);
8741 *sp++ = mono_decompose_opcode (cfg, ins);
8746 case CEE_REFANYVAL: {
8747 MonoInst *src_var, *src;
8749 int klass_reg = alloc_preg (cfg);
8750 int dreg = alloc_preg (cfg);
8753 MONO_INST_NEW (cfg, ins, *ip);
8756 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8757 CHECK_TYPELOAD (klass);
8758 mono_class_init (klass);
8760 if (cfg->generic_sharing_context)
8761 context_used = mono_class_check_context_used (klass);
8764 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8766 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8767 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8771 MonoInst *klass_ins;
8773 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8774 klass, MONO_RGCTX_INFO_KLASS);
8777 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8778 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8780 mini_emit_class_check (cfg, klass_reg, klass);
8782 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8783 ins->type = STACK_MP;
8788 case CEE_MKREFANY: {
8789 MonoInst *loc, *addr;
8792 MONO_INST_NEW (cfg, ins, *ip);
8795 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8796 CHECK_TYPELOAD (klass);
8797 mono_class_init (klass);
8799 if (cfg->generic_sharing_context)
8800 context_used = mono_class_check_context_used (klass);
8802 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8803 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8806 MonoInst *const_ins;
8807 int type_reg = alloc_preg (cfg);
8809 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8810 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8811 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8813 } else if (cfg->compile_aot) {
8814 int const_reg = alloc_preg (cfg);
8815 int type_reg = alloc_preg (cfg);
8817 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8818 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8819 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8822 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8827 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8828 ins->type = STACK_VTYPE;
8829 ins->klass = mono_defaults.typed_reference_class;
8836 MonoClass *handle_class;
8838 CHECK_STACK_OVF (1);
8841 n = read32 (ip + 1);
8843 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8844 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8845 handle = mono_method_get_wrapper_data (method, n);
8846 handle_class = mono_method_get_wrapper_data (method, n + 1);
8847 if (handle_class == mono_defaults.typehandle_class)
8848 handle = &((MonoClass*)handle)->byval_arg;
8851 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8855 mono_class_init (handle_class);
8856 if (cfg->generic_sharing_context) {
8857 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8858 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8859 /* This case handles ldtoken
8860 of an open type, like for
8863 } else if (handle_class == mono_defaults.typehandle_class) {
8864 /* If we get a MONO_TYPE_CLASS
8865 then we need to provide the
8867 instantiation of it. */
8868 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8871 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8872 } else if (handle_class == mono_defaults.fieldhandle_class)
8873 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8874 else if (handle_class == mono_defaults.methodhandle_class)
8875 context_used = mono_method_check_context_used (handle);
8877 g_assert_not_reached ();
8880 if ((cfg->opt & MONO_OPT_SHARED) &&
8881 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8882 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8883 MonoInst *addr, *vtvar, *iargs [3];
8884 int method_context_used;
8886 if (cfg->generic_sharing_context)
8887 method_context_used = mono_method_check_context_used (method);
8889 method_context_used = 0;
8891 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8893 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8894 EMIT_NEW_ICONST (cfg, iargs [1], n);
8895 if (method_context_used) {
8896 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8897 method, MONO_RGCTX_INFO_METHOD);
8898 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8900 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8901 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8903 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8905 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8907 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8909 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8910 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8911 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8912 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8913 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8914 MonoClass *tclass = mono_class_from_mono_type (handle);
8916 mono_class_init (tclass);
8918 ins = emit_get_rgctx_klass (cfg, context_used,
8919 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8920 } else if (cfg->compile_aot) {
8921 if (method->wrapper_type) {
8922 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8923 /* Special case for static synchronized wrappers */
8924 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8926 /* FIXME: n is not a normal token */
8927 cfg->disable_aot = TRUE;
8928 EMIT_NEW_PCONST (cfg, ins, NULL);
8931 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8934 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8936 ins->type = STACK_OBJ;
8937 ins->klass = cmethod->klass;
8940 MonoInst *addr, *vtvar;
8942 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8945 if (handle_class == mono_defaults.typehandle_class) {
8946 ins = emit_get_rgctx_klass (cfg, context_used,
8947 mono_class_from_mono_type (handle),
8948 MONO_RGCTX_INFO_TYPE);
8949 } else if (handle_class == mono_defaults.methodhandle_class) {
8950 ins = emit_get_rgctx_method (cfg, context_used,
8951 handle, MONO_RGCTX_INFO_METHOD);
8952 } else if (handle_class == mono_defaults.fieldhandle_class) {
8953 ins = emit_get_rgctx_field (cfg, context_used,
8954 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8956 g_assert_not_reached ();
8958 } else if (cfg->compile_aot) {
8959 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8961 EMIT_NEW_PCONST (cfg, ins, handle);
8963 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8965 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8975 MONO_INST_NEW (cfg, ins, OP_THROW);
8977 ins->sreg1 = sp [0]->dreg;
8979 bblock->out_of_line = TRUE;
8980 MONO_ADD_INS (bblock, ins);
8981 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8982 MONO_ADD_INS (bblock, ins);
8985 link_bblock (cfg, bblock, end_bblock);
8986 start_new_bblock = 1;
8988 case CEE_ENDFINALLY:
8989 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8990 MONO_ADD_INS (bblock, ins);
8992 start_new_bblock = 1;
8995 * Control will leave the method so empty the stack, otherwise
8996 * the next basic block will start with a nonempty stack.
8998 while (sp != stack_start) {
9006 if (*ip == CEE_LEAVE) {
9008 target = ip + 5 + (gint32)read32(ip + 1);
9011 target = ip + 2 + (signed char)(ip [1]);
9014 /* empty the stack */
9015 while (sp != stack_start) {
9020 * If this leave statement is in a catch block, check for a
9021 * pending exception, and rethrow it if necessary.
9022 * We avoid doing this in runtime invoke wrappers, since those are called
9023 * by native code which excepts the wrapper to catch all exceptions.
9025 for (i = 0; i < header->num_clauses; ++i) {
9026 MonoExceptionClause *clause = &header->clauses [i];
9029 * Use <= in the final comparison to handle clauses with multiple
9030 * leave statements, like in bug #78024.
9031 * The ordering of the exception clauses guarantees that we find the
9034 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9036 MonoBasicBlock *dont_throw;
9041 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9044 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9046 NEW_BBLOCK (cfg, dont_throw);
9049 * Currently, we allways rethrow the abort exception, despite the
9050 * fact that this is not correct. See thread6.cs for an example.
9051 * But propagating the abort exception is more important than
9052 * getting the sematics right.
9054 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9055 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9056 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9058 MONO_START_BB (cfg, dont_throw);
9063 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9065 MonoExceptionClause *clause;
9067 for (tmp = handlers; tmp; tmp = tmp->next) {
9069 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9071 link_bblock (cfg, bblock, tblock);
9072 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9073 ins->inst_target_bb = tblock;
9074 ins->inst_eh_block = clause;
9075 MONO_ADD_INS (bblock, ins);
9076 bblock->has_call_handler = 1;
9077 if (COMPILE_LLVM (cfg)) {
9078 MonoBasicBlock *target_bb;
9081 * Link the finally bblock with the target, since it will
9082 * conceptually branch there.
9083 * FIXME: Have to link the bblock containing the endfinally.
9085 GET_BBLOCK (cfg, target_bb, target);
9086 link_bblock (cfg, tblock, target_bb);
9089 g_list_free (handlers);
9092 MONO_INST_NEW (cfg, ins, OP_BR);
9093 MONO_ADD_INS (bblock, ins);
9094 GET_BBLOCK (cfg, tblock, target);
9095 link_bblock (cfg, bblock, tblock);
9096 ins->inst_target_bb = tblock;
9097 start_new_bblock = 1;
9099 if (*ip == CEE_LEAVE)
9108 * Mono specific opcodes
9110 case MONO_CUSTOM_PREFIX: {
9112 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9116 case CEE_MONO_ICALL: {
9118 MonoJitICallInfo *info;
9120 token = read32 (ip + 2);
9121 func = mono_method_get_wrapper_data (method, token);
9122 info = mono_find_jit_icall_by_addr (func);
9125 CHECK_STACK (info->sig->param_count);
9126 sp -= info->sig->param_count;
9128 ins = mono_emit_jit_icall (cfg, info->func, sp);
9129 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9133 inline_costs += 10 * num_calls++;
9137 case CEE_MONO_LDPTR: {
9140 CHECK_STACK_OVF (1);
9142 token = read32 (ip + 2);
9144 ptr = mono_method_get_wrapper_data (method, token);
9145 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9146 MonoJitICallInfo *callinfo;
9147 const char *icall_name;
9149 icall_name = method->name + strlen ("__icall_wrapper_");
9150 g_assert (icall_name);
9151 callinfo = mono_find_jit_icall_by_name (icall_name);
9152 g_assert (callinfo);
9154 if (ptr == callinfo->func) {
9155 /* Will be transformed into an AOTCONST later */
9156 EMIT_NEW_PCONST (cfg, ins, ptr);
9162 /* FIXME: Generalize this */
9163 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9164 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9169 EMIT_NEW_PCONST (cfg, ins, ptr);
9172 inline_costs += 10 * num_calls++;
9173 /* Can't embed random pointers into AOT code */
9174 cfg->disable_aot = 1;
9177 case CEE_MONO_ICALL_ADDR: {
9178 MonoMethod *cmethod;
9181 CHECK_STACK_OVF (1);
9183 token = read32 (ip + 2);
9185 cmethod = mono_method_get_wrapper_data (method, token);
9187 if (cfg->compile_aot) {
9188 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9190 ptr = mono_lookup_internal_call (cmethod);
9192 EMIT_NEW_PCONST (cfg, ins, ptr);
9198 case CEE_MONO_VTADDR: {
9199 MonoInst *src_var, *src;
9205 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9206 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9211 case CEE_MONO_NEWOBJ: {
9212 MonoInst *iargs [2];
9214 CHECK_STACK_OVF (1);
9216 token = read32 (ip + 2);
9217 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9218 mono_class_init (klass);
9219 NEW_DOMAINCONST (cfg, iargs [0]);
9220 MONO_ADD_INS (cfg->cbb, iargs [0]);
9221 NEW_CLASSCONST (cfg, iargs [1], klass);
9222 MONO_ADD_INS (cfg->cbb, iargs [1]);
9223 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9225 inline_costs += 10 * num_calls++;
9228 case CEE_MONO_OBJADDR:
9231 MONO_INST_NEW (cfg, ins, OP_MOVE);
9232 ins->dreg = alloc_preg (cfg);
9233 ins->sreg1 = sp [0]->dreg;
9234 ins->type = STACK_MP;
9235 MONO_ADD_INS (cfg->cbb, ins);
9239 case CEE_MONO_LDNATIVEOBJ:
9241 * Similar to LDOBJ, but instead load the unmanaged
9242 * representation of the vtype to the stack.
9247 token = read32 (ip + 2);
9248 klass = mono_method_get_wrapper_data (method, token);
9249 g_assert (klass->valuetype);
9250 mono_class_init (klass);
9253 MonoInst *src, *dest, *temp;
9256 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9257 temp->backend.is_pinvoke = 1;
9258 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9259 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9261 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9262 dest->type = STACK_VTYPE;
9263 dest->klass = klass;
9269 case CEE_MONO_RETOBJ: {
9271 * Same as RET, but return the native representation of a vtype
9274 g_assert (cfg->ret);
9275 g_assert (mono_method_signature (method)->pinvoke);
9280 token = read32 (ip + 2);
9281 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9283 if (!cfg->vret_addr) {
9284 g_assert (cfg->ret_var_is_local);
9286 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9288 EMIT_NEW_RETLOADA (cfg, ins);
9290 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9292 if (sp != stack_start)
9295 MONO_INST_NEW (cfg, ins, OP_BR);
9296 ins->inst_target_bb = end_bblock;
9297 MONO_ADD_INS (bblock, ins);
9298 link_bblock (cfg, bblock, end_bblock);
9299 start_new_bblock = 1;
9303 case CEE_MONO_CISINST:
9304 case CEE_MONO_CCASTCLASS: {
9309 token = read32 (ip + 2);
9310 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9311 if (ip [1] == CEE_MONO_CISINST)
9312 ins = handle_cisinst (cfg, klass, sp [0]);
9314 ins = handle_ccastclass (cfg, klass, sp [0]);
9320 case CEE_MONO_SAVE_LMF:
9321 case CEE_MONO_RESTORE_LMF:
9322 #ifdef MONO_ARCH_HAVE_LMF_OPS
9323 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9324 MONO_ADD_INS (bblock, ins);
9325 cfg->need_lmf_area = TRUE;
9329 case CEE_MONO_CLASSCONST:
9330 CHECK_STACK_OVF (1);
9332 token = read32 (ip + 2);
9333 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9336 inline_costs += 10 * num_calls++;
9338 case CEE_MONO_NOT_TAKEN:
9339 bblock->out_of_line = TRUE;
9343 CHECK_STACK_OVF (1);
9345 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9346 ins->dreg = alloc_preg (cfg);
9347 ins->inst_offset = (gint32)read32 (ip + 2);
9348 ins->type = STACK_PTR;
9349 MONO_ADD_INS (bblock, ins);
9353 case CEE_MONO_DYN_CALL: {
9356 /* It would be easier to call a trampoline, but that would put an
9357 * extra frame on the stack, confusing exception handling. So
9358 * implement it inline using an opcode for now.
9361 if (!cfg->dyn_call_var) {
9362 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9363 /* prevent it from being register allocated */
9364 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9367 /* Has to use a call inst since it local regalloc expects it */
9368 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9369 ins = (MonoInst*)call;
9371 ins->sreg1 = sp [0]->dreg;
9372 ins->sreg2 = sp [1]->dreg;
9373 MONO_ADD_INS (bblock, ins);
9375 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9376 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9380 inline_costs += 10 * num_calls++;
9385 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9395 /* somewhat similar to LDTOKEN */
9396 MonoInst *addr, *vtvar;
9397 CHECK_STACK_OVF (1);
9398 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9400 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9401 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9403 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9404 ins->type = STACK_VTYPE;
9405 ins->klass = mono_defaults.argumenthandle_class;
9418 * The following transforms:
9419 * CEE_CEQ into OP_CEQ
9420 * CEE_CGT into OP_CGT
9421 * CEE_CGT_UN into OP_CGT_UN
9422 * CEE_CLT into OP_CLT
9423 * CEE_CLT_UN into OP_CLT_UN
9425 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9427 MONO_INST_NEW (cfg, ins, cmp->opcode);
9429 cmp->sreg1 = sp [0]->dreg;
9430 cmp->sreg2 = sp [1]->dreg;
9431 type_from_op (cmp, sp [0], sp [1]);
9433 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9434 cmp->opcode = OP_LCOMPARE;
9435 else if (sp [0]->type == STACK_R8)
9436 cmp->opcode = OP_FCOMPARE;
9438 cmp->opcode = OP_ICOMPARE;
9439 MONO_ADD_INS (bblock, cmp);
9440 ins->type = STACK_I4;
9441 ins->dreg = alloc_dreg (cfg, ins->type);
9442 type_from_op (ins, sp [0], sp [1]);
9444 if (cmp->opcode == OP_FCOMPARE) {
9446 * The backends expect the fceq opcodes to do the
9449 cmp->opcode = OP_NOP;
9450 ins->sreg1 = cmp->sreg1;
9451 ins->sreg2 = cmp->sreg2;
9453 MONO_ADD_INS (bblock, ins);
9460 MonoMethod *cil_method;
9461 gboolean needs_static_rgctx_invoke;
9463 CHECK_STACK_OVF (1);
9465 n = read32 (ip + 2);
9466 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9469 mono_class_init (cmethod->klass);
9471 mono_save_token_info (cfg, image, n, cmethod);
9473 if (cfg->generic_sharing_context)
9474 context_used = mono_method_check_context_used (cmethod);
9476 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9478 cil_method = cmethod;
9479 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9480 METHOD_ACCESS_FAILURE;
9482 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9483 if (check_linkdemand (cfg, method, cmethod))
9485 CHECK_CFG_EXCEPTION;
9486 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9487 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9491 * Optimize the common case of ldftn+delegate creation
9493 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9494 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9495 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9497 int invoke_context_used = 0;
9499 invoke = mono_get_delegate_invoke (ctor_method->klass);
9500 if (!invoke || !mono_method_signature (invoke))
9503 if (cfg->generic_sharing_context)
9504 invoke_context_used = mono_method_check_context_used (invoke);
9506 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9507 /* FIXME: SGEN support */
9508 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9509 MonoInst *target_ins;
9512 if (cfg->verbose_level > 3)
9513 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9514 target_ins = sp [-1];
9516 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9517 CHECK_CFG_EXCEPTION;
9526 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9527 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9531 inline_costs += 10 * num_calls++;
9534 case CEE_LDVIRTFTN: {
9539 n = read32 (ip + 2);
9540 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9543 mono_class_init (cmethod->klass);
9545 if (cfg->generic_sharing_context)
9546 context_used = mono_method_check_context_used (cmethod);
9548 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9549 if (check_linkdemand (cfg, method, cmethod))
9551 CHECK_CFG_EXCEPTION;
9552 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9553 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9559 args [1] = emit_get_rgctx_method (cfg, context_used,
9560 cmethod, MONO_RGCTX_INFO_METHOD);
9563 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9565 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9568 inline_costs += 10 * num_calls++;
9572 CHECK_STACK_OVF (1);
9574 n = read16 (ip + 2);
9576 EMIT_NEW_ARGLOAD (cfg, ins, n);
9581 CHECK_STACK_OVF (1);
9583 n = read16 (ip + 2);
9585 NEW_ARGLOADA (cfg, ins, n);
9586 MONO_ADD_INS (cfg->cbb, ins);
9594 n = read16 (ip + 2);
9596 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9598 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9602 CHECK_STACK_OVF (1);
9604 n = read16 (ip + 2);
9606 EMIT_NEW_LOCLOAD (cfg, ins, n);
9611 unsigned char *tmp_ip;
9612 CHECK_STACK_OVF (1);
9614 n = read16 (ip + 2);
9617 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9623 EMIT_NEW_LOCLOADA (cfg, ins, n);
9632 n = read16 (ip + 2);
9634 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9636 emit_stloc_ir (cfg, sp, header, n);
9643 if (sp != stack_start)
9645 if (cfg->method != method)
9647 * Inlining this into a loop in a parent could lead to
9648 * stack overflows which is different behavior than the
9649 * non-inlined case, thus disable inlining in this case.
9651 goto inline_failure;
9653 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9654 ins->dreg = alloc_preg (cfg);
9655 ins->sreg1 = sp [0]->dreg;
9656 ins->type = STACK_PTR;
9657 MONO_ADD_INS (cfg->cbb, ins);
9659 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9661 ins->flags |= MONO_INST_INIT;
9666 case CEE_ENDFILTER: {
9667 MonoExceptionClause *clause, *nearest;
9668 int cc, nearest_num;
9672 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9674 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9675 ins->sreg1 = (*sp)->dreg;
9676 MONO_ADD_INS (bblock, ins);
9677 start_new_bblock = 1;
9682 for (cc = 0; cc < header->num_clauses; ++cc) {
9683 clause = &header->clauses [cc];
9684 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9685 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9686 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9692 if ((ip - header->code) != nearest->handler_offset)
9697 case CEE_UNALIGNED_:
9698 ins_flag |= MONO_INST_UNALIGNED;
9699 /* FIXME: record alignment? we can assume 1 for now */
9704 ins_flag |= MONO_INST_VOLATILE;
9708 ins_flag |= MONO_INST_TAILCALL;
9709 cfg->flags |= MONO_CFG_HAS_TAIL;
9710 /* Can't inline tail calls at this time */
9711 inline_costs += 100000;
9718 token = read32 (ip + 2);
9719 klass = mini_get_class (method, token, generic_context);
9720 CHECK_TYPELOAD (klass);
9721 if (generic_class_is_reference_type (cfg, klass))
9722 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9724 mini_emit_initobj (cfg, *sp, NULL, klass);
9728 case CEE_CONSTRAINED_:
9730 token = read32 (ip + 2);
9731 if (method->wrapper_type != MONO_WRAPPER_NONE)
9732 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9734 constrained_call = mono_class_get_full (image, token, generic_context);
9735 CHECK_TYPELOAD (constrained_call);
9740 MonoInst *iargs [3];
9744 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9745 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9746 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9747 /* emit_memset only works when val == 0 */
9748 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9753 if (ip [1] == CEE_CPBLK) {
9754 MonoMethod *memcpy_method = get_memcpy_method ();
9755 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9757 MonoMethod *memset_method = get_memset_method ();
9758 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9768 ins_flag |= MONO_INST_NOTYPECHECK;
9770 ins_flag |= MONO_INST_NORANGECHECK;
9771 /* we ignore the no-nullcheck for now since we
9772 * really do it explicitly only when doing callvirt->call
9778 int handler_offset = -1;
9780 for (i = 0; i < header->num_clauses; ++i) {
9781 MonoExceptionClause *clause = &header->clauses [i];
9782 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9783 handler_offset = clause->handler_offset;
9788 bblock->flags |= BB_EXCEPTION_UNSAFE;
9790 g_assert (handler_offset != -1);
9792 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9793 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9794 ins->sreg1 = load->dreg;
9795 MONO_ADD_INS (bblock, ins);
9797 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9798 MONO_ADD_INS (bblock, ins);
9801 link_bblock (cfg, bblock, end_bblock);
9802 start_new_bblock = 1;
9810 CHECK_STACK_OVF (1);
9812 token = read32 (ip + 2);
9813 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9814 MonoType *type = mono_type_create_from_typespec (image, token);
9815 token = mono_type_size (type, &ialign);
9817 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9818 CHECK_TYPELOAD (klass);
9819 mono_class_init (klass);
9820 token = mono_class_value_size (klass, &align);
9822 EMIT_NEW_ICONST (cfg, ins, token);
9827 case CEE_REFANYTYPE: {
9828 MonoInst *src_var, *src;
9834 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9836 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9837 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9838 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9856 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9866 g_warning ("opcode 0x%02x not handled", *ip);
9870 if (start_new_bblock != 1)
9873 bblock->cil_length = ip - bblock->cil_code;
9874 bblock->next_bb = end_bblock;
9876 if (cfg->method == method && cfg->domainvar) {
9878 MonoInst *get_domain;
9880 cfg->cbb = init_localsbb;
9882 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9883 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9886 get_domain->dreg = alloc_preg (cfg);
9887 MONO_ADD_INS (cfg->cbb, get_domain);
9889 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9890 MONO_ADD_INS (cfg->cbb, store);
9893 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9894 if (cfg->compile_aot)
9895 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9896 mono_get_got_var (cfg);
9899 if (cfg->method == method && cfg->got_var)
9900 mono_emit_load_got_addr (cfg);
9905 cfg->cbb = init_localsbb;
9907 for (i = 0; i < header->num_locals; ++i) {
9908 MonoType *ptype = header->locals [i];
9909 int t = ptype->type;
9910 dreg = cfg->locals [i]->dreg;
9912 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9913 t = mono_class_enum_basetype (ptype->data.klass)->type;
9915 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9916 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9917 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9918 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9919 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9920 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9921 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9922 ins->type = STACK_R8;
9923 ins->inst_p0 = (void*)&r8_0;
9924 ins->dreg = alloc_dreg (cfg, STACK_R8);
9925 MONO_ADD_INS (init_localsbb, ins);
9926 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9927 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9928 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9929 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9931 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9936 if (cfg->init_ref_vars && cfg->method == method) {
9937 /* Emit initialization for ref vars */
9938 // FIXME: Avoid duplication initialization for IL locals.
9939 for (i = 0; i < cfg->num_varinfo; ++i) {
9940 MonoInst *ins = cfg->varinfo [i];
9942 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9943 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9947 /* Add a sequence point for method entry/exit events */
9949 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9950 MONO_ADD_INS (init_localsbb, ins);
9951 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9952 MONO_ADD_INS (cfg->bb_exit, ins);
9957 if (cfg->method == method) {
9959 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9960 bb->region = mono_find_block_region (cfg, bb->real_offset);
9962 mono_create_spvar_for_region (cfg, bb->region);
9963 if (cfg->verbose_level > 2)
9964 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9968 g_slist_free (class_inits);
9969 dont_inline = g_list_remove (dont_inline, method);
9971 if (inline_costs < 0) {
9974 /* Method is too large */
9975 mname = mono_method_full_name (method, TRUE);
9976 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9977 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9979 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9980 mono_basic_block_free (original_bb);
9984 if ((cfg->verbose_level > 2) && (cfg->method == method))
9985 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9987 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9988 mono_basic_block_free (original_bb);
9989 return inline_costs;
9992 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9999 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10003 set_exception_type_from_invalid_il (cfg, method, ip);
10007 g_slist_free (class_inits);
10008 mono_basic_block_free (original_bb);
10009 dont_inline = g_list_remove (dont_inline, method);
10010 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10015 store_membase_reg_to_store_membase_imm (int opcode)
10018 case OP_STORE_MEMBASE_REG:
10019 return OP_STORE_MEMBASE_IMM;
10020 case OP_STOREI1_MEMBASE_REG:
10021 return OP_STOREI1_MEMBASE_IMM;
10022 case OP_STOREI2_MEMBASE_REG:
10023 return OP_STOREI2_MEMBASE_IMM;
10024 case OP_STOREI4_MEMBASE_REG:
10025 return OP_STOREI4_MEMBASE_IMM;
10026 case OP_STOREI8_MEMBASE_REG:
10027 return OP_STOREI8_MEMBASE_IMM;
10029 g_assert_not_reached ();
10035 #endif /* DISABLE_JIT */
10038 mono_op_to_op_imm (int opcode)
10042 return OP_IADD_IMM;
10044 return OP_ISUB_IMM;
10046 return OP_IDIV_IMM;
10048 return OP_IDIV_UN_IMM;
10050 return OP_IREM_IMM;
10052 return OP_IREM_UN_IMM;
10054 return OP_IMUL_IMM;
10056 return OP_IAND_IMM;
10060 return OP_IXOR_IMM;
10062 return OP_ISHL_IMM;
10064 return OP_ISHR_IMM;
10066 return OP_ISHR_UN_IMM;
10069 return OP_LADD_IMM;
10071 return OP_LSUB_IMM;
10073 return OP_LAND_IMM;
10077 return OP_LXOR_IMM;
10079 return OP_LSHL_IMM;
10081 return OP_LSHR_IMM;
10083 return OP_LSHR_UN_IMM;
10086 return OP_COMPARE_IMM;
10088 return OP_ICOMPARE_IMM;
10090 return OP_LCOMPARE_IMM;
10092 case OP_STORE_MEMBASE_REG:
10093 return OP_STORE_MEMBASE_IMM;
10094 case OP_STOREI1_MEMBASE_REG:
10095 return OP_STOREI1_MEMBASE_IMM;
10096 case OP_STOREI2_MEMBASE_REG:
10097 return OP_STOREI2_MEMBASE_IMM;
10098 case OP_STOREI4_MEMBASE_REG:
10099 return OP_STOREI4_MEMBASE_IMM;
10101 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10103 return OP_X86_PUSH_IMM;
10104 case OP_X86_COMPARE_MEMBASE_REG:
10105 return OP_X86_COMPARE_MEMBASE_IMM;
10107 #if defined(TARGET_AMD64)
10108 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10109 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10111 case OP_VOIDCALL_REG:
10112 return OP_VOIDCALL;
10120 return OP_LOCALLOC_IMM;
10127 ldind_to_load_membase (int opcode)
10131 return OP_LOADI1_MEMBASE;
10133 return OP_LOADU1_MEMBASE;
10135 return OP_LOADI2_MEMBASE;
10137 return OP_LOADU2_MEMBASE;
10139 return OP_LOADI4_MEMBASE;
10141 return OP_LOADU4_MEMBASE;
10143 return OP_LOAD_MEMBASE;
10144 case CEE_LDIND_REF:
10145 return OP_LOAD_MEMBASE;
10147 return OP_LOADI8_MEMBASE;
10149 return OP_LOADR4_MEMBASE;
10151 return OP_LOADR8_MEMBASE;
10153 g_assert_not_reached ();
10160 stind_to_store_membase (int opcode)
10164 return OP_STOREI1_MEMBASE_REG;
10166 return OP_STOREI2_MEMBASE_REG;
10168 return OP_STOREI4_MEMBASE_REG;
10170 case CEE_STIND_REF:
10171 return OP_STORE_MEMBASE_REG;
10173 return OP_STOREI8_MEMBASE_REG;
10175 return OP_STORER4_MEMBASE_REG;
10177 return OP_STORER8_MEMBASE_REG;
10179 g_assert_not_reached ();
10186 mono_load_membase_to_load_mem (int opcode)
10188 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10189 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10191 case OP_LOAD_MEMBASE:
10192 return OP_LOAD_MEM;
10193 case OP_LOADU1_MEMBASE:
10194 return OP_LOADU1_MEM;
10195 case OP_LOADU2_MEMBASE:
10196 return OP_LOADU2_MEM;
10197 case OP_LOADI4_MEMBASE:
10198 return OP_LOADI4_MEM;
10199 case OP_LOADU4_MEMBASE:
10200 return OP_LOADU4_MEM;
10201 #if SIZEOF_REGISTER == 8
10202 case OP_LOADI8_MEMBASE:
10203 return OP_LOADI8_MEM;
10212 op_to_op_dest_membase (int store_opcode, int opcode)
10214 #if defined(TARGET_X86)
10215 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10220 return OP_X86_ADD_MEMBASE_REG;
10222 return OP_X86_SUB_MEMBASE_REG;
10224 return OP_X86_AND_MEMBASE_REG;
10226 return OP_X86_OR_MEMBASE_REG;
10228 return OP_X86_XOR_MEMBASE_REG;
10231 return OP_X86_ADD_MEMBASE_IMM;
10234 return OP_X86_SUB_MEMBASE_IMM;
10237 return OP_X86_AND_MEMBASE_IMM;
10240 return OP_X86_OR_MEMBASE_IMM;
10243 return OP_X86_XOR_MEMBASE_IMM;
10249 #if defined(TARGET_AMD64)
10250 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10255 return OP_X86_ADD_MEMBASE_REG;
10257 return OP_X86_SUB_MEMBASE_REG;
10259 return OP_X86_AND_MEMBASE_REG;
10261 return OP_X86_OR_MEMBASE_REG;
10263 return OP_X86_XOR_MEMBASE_REG;
10265 return OP_X86_ADD_MEMBASE_IMM;
10267 return OP_X86_SUB_MEMBASE_IMM;
10269 return OP_X86_AND_MEMBASE_IMM;
10271 return OP_X86_OR_MEMBASE_IMM;
10273 return OP_X86_XOR_MEMBASE_IMM;
10275 return OP_AMD64_ADD_MEMBASE_REG;
10277 return OP_AMD64_SUB_MEMBASE_REG;
10279 return OP_AMD64_AND_MEMBASE_REG;
10281 return OP_AMD64_OR_MEMBASE_REG;
10283 return OP_AMD64_XOR_MEMBASE_REG;
10286 return OP_AMD64_ADD_MEMBASE_IMM;
10289 return OP_AMD64_SUB_MEMBASE_IMM;
10292 return OP_AMD64_AND_MEMBASE_IMM;
10295 return OP_AMD64_OR_MEMBASE_IMM;
10298 return OP_AMD64_XOR_MEMBASE_IMM;
10308 op_to_op_store_membase (int store_opcode, int opcode)
10310 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10313 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10314 return OP_X86_SETEQ_MEMBASE;
10316 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10317 return OP_X86_SETNE_MEMBASE;
10325 op_to_op_src1_membase (int load_opcode, int opcode)
10328 /* FIXME: This has sign extension issues */
10330 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10331 return OP_X86_COMPARE_MEMBASE8_IMM;
10334 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10339 return OP_X86_PUSH_MEMBASE;
10340 case OP_COMPARE_IMM:
10341 case OP_ICOMPARE_IMM:
10342 return OP_X86_COMPARE_MEMBASE_IMM;
10345 return OP_X86_COMPARE_MEMBASE_REG;
10349 #ifdef TARGET_AMD64
10350 /* FIXME: This has sign extension issues */
10352 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10353 return OP_X86_COMPARE_MEMBASE8_IMM;
10358 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10359 return OP_X86_PUSH_MEMBASE;
10361 /* FIXME: This only works for 32 bit immediates
10362 case OP_COMPARE_IMM:
10363 case OP_LCOMPARE_IMM:
10364 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10365 return OP_AMD64_COMPARE_MEMBASE_IMM;
10367 case OP_ICOMPARE_IMM:
10368 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10369 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10373 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10374 return OP_AMD64_COMPARE_MEMBASE_REG;
10377 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10378 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10387 op_to_op_src2_membase (int load_opcode, int opcode)
10390 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10396 return OP_X86_COMPARE_REG_MEMBASE;
10398 return OP_X86_ADD_REG_MEMBASE;
10400 return OP_X86_SUB_REG_MEMBASE;
10402 return OP_X86_AND_REG_MEMBASE;
10404 return OP_X86_OR_REG_MEMBASE;
10406 return OP_X86_XOR_REG_MEMBASE;
10410 #ifdef TARGET_AMD64
10413 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10414 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10418 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10419 return OP_AMD64_COMPARE_REG_MEMBASE;
10422 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10423 return OP_X86_ADD_REG_MEMBASE;
10425 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10426 return OP_X86_SUB_REG_MEMBASE;
10428 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10429 return OP_X86_AND_REG_MEMBASE;
10431 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10432 return OP_X86_OR_REG_MEMBASE;
10434 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10435 return OP_X86_XOR_REG_MEMBASE;
10437 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10438 return OP_AMD64_ADD_REG_MEMBASE;
10440 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10441 return OP_AMD64_SUB_REG_MEMBASE;
10443 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10444 return OP_AMD64_AND_REG_MEMBASE;
10446 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10447 return OP_AMD64_OR_REG_MEMBASE;
10449 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10450 return OP_AMD64_XOR_REG_MEMBASE;
10458 mono_op_to_op_imm_noemul (int opcode)
10461 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10467 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10475 return mono_op_to_op_imm (opcode);
10479 #ifndef DISABLE_JIT
10482 * mono_handle_global_vregs:
10484 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10488 mono_handle_global_vregs (MonoCompile *cfg)
10490 gint32 *vreg_to_bb;
10491 MonoBasicBlock *bb;
10494 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10496 #ifdef MONO_ARCH_SIMD_INTRINSICS
10497 if (cfg->uses_simd_intrinsics)
10498 mono_simd_simplify_indirection (cfg);
10501 /* Find local vregs used in more than one bb */
10502 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10503 MonoInst *ins = bb->code;
10504 int block_num = bb->block_num;
10506 if (cfg->verbose_level > 2)
10507 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10510 for (; ins; ins = ins->next) {
10511 const char *spec = INS_INFO (ins->opcode);
10512 int regtype = 0, regindex;
10515 if (G_UNLIKELY (cfg->verbose_level > 2))
10516 mono_print_ins (ins);
10518 g_assert (ins->opcode >= MONO_CEE_LAST);
10520 for (regindex = 0; regindex < 4; regindex ++) {
10523 if (regindex == 0) {
10524 regtype = spec [MONO_INST_DEST];
10525 if (regtype == ' ')
10528 } else if (regindex == 1) {
10529 regtype = spec [MONO_INST_SRC1];
10530 if (regtype == ' ')
10533 } else if (regindex == 2) {
10534 regtype = spec [MONO_INST_SRC2];
10535 if (regtype == ' ')
10538 } else if (regindex == 3) {
10539 regtype = spec [MONO_INST_SRC3];
10540 if (regtype == ' ')
10545 #if SIZEOF_REGISTER == 4
10546 /* In the LLVM case, the long opcodes are not decomposed */
10547 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10549 * Since some instructions reference the original long vreg,
10550 * and some reference the two component vregs, it is quite hard
10551 * to determine when it needs to be global. So be conservative.
10553 if (!get_vreg_to_inst (cfg, vreg)) {
10554 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10556 if (cfg->verbose_level > 2)
10557 printf ("LONG VREG R%d made global.\n", vreg);
10561 * Make the component vregs volatile since the optimizations can
10562 * get confused otherwise.
10564 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10565 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10569 g_assert (vreg != -1);
10571 prev_bb = vreg_to_bb [vreg];
10572 if (prev_bb == 0) {
10573 /* 0 is a valid block num */
10574 vreg_to_bb [vreg] = block_num + 1;
10575 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10576 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10579 if (!get_vreg_to_inst (cfg, vreg)) {
10580 if (G_UNLIKELY (cfg->verbose_level > 2))
10581 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10585 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10588 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10591 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10594 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10597 g_assert_not_reached ();
10601 /* Flag as having been used in more than one bb */
10602 vreg_to_bb [vreg] = -1;
10608 /* If a variable is used in only one bblock, convert it into a local vreg */
10609 for (i = 0; i < cfg->num_varinfo; i++) {
10610 MonoInst *var = cfg->varinfo [i];
10611 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10613 switch (var->type) {
10619 #if SIZEOF_REGISTER == 8
10622 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10623 /* Enabling this screws up the fp stack on x86 */
10626 /* Arguments are implicitly global */
10627 /* Putting R4 vars into registers doesn't work currently */
10628 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10630 * Make that the variable's liveness interval doesn't contain a call, since
10631 * that would cause the lvreg to be spilled, making the whole optimization
10634 /* This is too slow for JIT compilation */
10636 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10638 int def_index, call_index, ins_index;
10639 gboolean spilled = FALSE;
10644 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10645 const char *spec = INS_INFO (ins->opcode);
10647 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10648 def_index = ins_index;
10650 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10651 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10652 if (call_index > def_index) {
10658 if (MONO_IS_CALL (ins))
10659 call_index = ins_index;
10669 if (G_UNLIKELY (cfg->verbose_level > 2))
10670 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10671 var->flags |= MONO_INST_IS_DEAD;
10672 cfg->vreg_to_inst [var->dreg] = NULL;
10679 * Compress the varinfo and vars tables so the liveness computation is faster and
10680 * takes up less space.
10683 for (i = 0; i < cfg->num_varinfo; ++i) {
10684 MonoInst *var = cfg->varinfo [i];
10685 if (pos < i && cfg->locals_start == i)
10686 cfg->locals_start = pos;
10687 if (!(var->flags & MONO_INST_IS_DEAD)) {
10689 cfg->varinfo [pos] = cfg->varinfo [i];
10690 cfg->varinfo [pos]->inst_c0 = pos;
10691 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10692 cfg->vars [pos].idx = pos;
10693 #if SIZEOF_REGISTER == 4
10694 if (cfg->varinfo [pos]->type == STACK_I8) {
10695 /* Modify the two component vars too */
10698 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10699 var1->inst_c0 = pos;
10700 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10701 var1->inst_c0 = pos;
10708 cfg->num_varinfo = pos;
10709 if (cfg->locals_start > cfg->num_varinfo)
10710 cfg->locals_start = cfg->num_varinfo;
10714 * mono_spill_global_vars:
10716 * Generate spill code for variables which are not allocated to registers,
10717 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10718 * code is generated which could be optimized by the local optimization passes.
10721 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10723 MonoBasicBlock *bb;
10725 int orig_next_vreg;
10726 guint32 *vreg_to_lvreg;
10728 guint32 i, lvregs_len;
10729 gboolean dest_has_lvreg = FALSE;
10730 guint32 stacktypes [128];
10731 MonoInst **live_range_start, **live_range_end;
10732 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10734 *need_local_opts = FALSE;
10736 memset (spec2, 0, sizeof (spec2));
10738 /* FIXME: Move this function to mini.c */
10739 stacktypes ['i'] = STACK_PTR;
10740 stacktypes ['l'] = STACK_I8;
10741 stacktypes ['f'] = STACK_R8;
10742 #ifdef MONO_ARCH_SIMD_INTRINSICS
10743 stacktypes ['x'] = STACK_VTYPE;
10746 #if SIZEOF_REGISTER == 4
10747 /* Create MonoInsts for longs */
10748 for (i = 0; i < cfg->num_varinfo; i++) {
10749 MonoInst *ins = cfg->varinfo [i];
10751 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10752 switch (ins->type) {
10757 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10760 g_assert (ins->opcode == OP_REGOFFSET);
10762 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10764 tree->opcode = OP_REGOFFSET;
10765 tree->inst_basereg = ins->inst_basereg;
10766 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10768 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10770 tree->opcode = OP_REGOFFSET;
10771 tree->inst_basereg = ins->inst_basereg;
10772 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10782 /* FIXME: widening and truncation */
10785 * As an optimization, when a variable allocated to the stack is first loaded into
10786 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10787 * the variable again.
10789 orig_next_vreg = cfg->next_vreg;
10790 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10791 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10795 * These arrays contain the first and last instructions accessing a given
10797 * Since we emit bblocks in the same order we process them here, and we
10798 * don't split live ranges, these will precisely describe the live range of
10799 * the variable, i.e. the instruction range where a valid value can be found
10800 * in the variables location.
10801 * The live range is computed using the liveness info computed by the liveness pass.
10802 * We can't use vmv->range, since that is an abstract live range, and we need
10803 * one which is instruction precise.
10804 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10806 /* FIXME: Only do this if debugging info is requested */
10807 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10808 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10809 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10810 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10812 /* Add spill loads/stores */
10813 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10816 if (cfg->verbose_level > 2)
10817 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10819 /* Clear vreg_to_lvreg array */
10820 for (i = 0; i < lvregs_len; i++)
10821 vreg_to_lvreg [lvregs [i]] = 0;
10825 MONO_BB_FOR_EACH_INS (bb, ins) {
10826 const char *spec = INS_INFO (ins->opcode);
10827 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10828 gboolean store, no_lvreg;
10829 int sregs [MONO_MAX_SRC_REGS];
10831 if (G_UNLIKELY (cfg->verbose_level > 2))
10832 mono_print_ins (ins);
10834 if (ins->opcode == OP_NOP)
10838 * We handle LDADDR here as well, since it can only be decomposed
10839 * when variable addresses are known.
10841 if (ins->opcode == OP_LDADDR) {
10842 MonoInst *var = ins->inst_p0;
10844 if (var->opcode == OP_VTARG_ADDR) {
10845 /* Happens on SPARC/S390 where vtypes are passed by reference */
10846 MonoInst *vtaddr = var->inst_left;
10847 if (vtaddr->opcode == OP_REGVAR) {
10848 ins->opcode = OP_MOVE;
10849 ins->sreg1 = vtaddr->dreg;
10851 else if (var->inst_left->opcode == OP_REGOFFSET) {
10852 ins->opcode = OP_LOAD_MEMBASE;
10853 ins->inst_basereg = vtaddr->inst_basereg;
10854 ins->inst_offset = vtaddr->inst_offset;
10858 g_assert (var->opcode == OP_REGOFFSET);
10860 ins->opcode = OP_ADD_IMM;
10861 ins->sreg1 = var->inst_basereg;
10862 ins->inst_imm = var->inst_offset;
10865 *need_local_opts = TRUE;
10866 spec = INS_INFO (ins->opcode);
10869 if (ins->opcode < MONO_CEE_LAST) {
10870 mono_print_ins (ins);
10871 g_assert_not_reached ();
10875 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10879 if (MONO_IS_STORE_MEMBASE (ins)) {
10880 tmp_reg = ins->dreg;
10881 ins->dreg = ins->sreg2;
10882 ins->sreg2 = tmp_reg;
10885 spec2 [MONO_INST_DEST] = ' ';
10886 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10887 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10888 spec2 [MONO_INST_SRC3] = ' ';
10890 } else if (MONO_IS_STORE_MEMINDEX (ins))
10891 g_assert_not_reached ();
10896 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10897 printf ("\t %.3s %d", spec, ins->dreg);
10898 num_sregs = mono_inst_get_src_registers (ins, sregs);
10899 for (srcindex = 0; srcindex < 3; ++srcindex)
10900 printf (" %d", sregs [srcindex]);
10907 regtype = spec [MONO_INST_DEST];
10908 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10911 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10912 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10913 MonoInst *store_ins;
10915 MonoInst *def_ins = ins;
10916 int dreg = ins->dreg; /* The original vreg */
10918 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10920 if (var->opcode == OP_REGVAR) {
10921 ins->dreg = var->dreg;
10922 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10924 * Instead of emitting a load+store, use a _membase opcode.
10926 g_assert (var->opcode == OP_REGOFFSET);
10927 if (ins->opcode == OP_MOVE) {
10931 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10932 ins->inst_basereg = var->inst_basereg;
10933 ins->inst_offset = var->inst_offset;
10936 spec = INS_INFO (ins->opcode);
10940 g_assert (var->opcode == OP_REGOFFSET);
10942 prev_dreg = ins->dreg;
10944 /* Invalidate any previous lvreg for this vreg */
10945 vreg_to_lvreg [ins->dreg] = 0;
10949 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10951 store_opcode = OP_STOREI8_MEMBASE_REG;
10954 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10956 if (regtype == 'l') {
10957 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10958 mono_bblock_insert_after_ins (bb, ins, store_ins);
10959 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10960 mono_bblock_insert_after_ins (bb, ins, store_ins);
10961 def_ins = store_ins;
10964 g_assert (store_opcode != OP_STOREV_MEMBASE);
10966 /* Try to fuse the store into the instruction itself */
10967 /* FIXME: Add more instructions */
10968 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10969 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10970 ins->inst_imm = ins->inst_c0;
10971 ins->inst_destbasereg = var->inst_basereg;
10972 ins->inst_offset = var->inst_offset;
10973 spec = INS_INFO (ins->opcode);
10974 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10975 ins->opcode = store_opcode;
10976 ins->inst_destbasereg = var->inst_basereg;
10977 ins->inst_offset = var->inst_offset;
10981 tmp_reg = ins->dreg;
10982 ins->dreg = ins->sreg2;
10983 ins->sreg2 = tmp_reg;
10986 spec2 [MONO_INST_DEST] = ' ';
10987 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10988 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10989 spec2 [MONO_INST_SRC3] = ' ';
10991 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10992 // FIXME: The backends expect the base reg to be in inst_basereg
10993 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10995 ins->inst_basereg = var->inst_basereg;
10996 ins->inst_offset = var->inst_offset;
10997 spec = INS_INFO (ins->opcode);
10999 /* printf ("INS: "); mono_print_ins (ins); */
11000 /* Create a store instruction */
11001 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11003 /* Insert it after the instruction */
11004 mono_bblock_insert_after_ins (bb, ins, store_ins);
11006 def_ins = store_ins;
11009 * We can't assign ins->dreg to var->dreg here, since the
11010 * sregs could use it. So set a flag, and do it after
11013 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11014 dest_has_lvreg = TRUE;
11019 if (def_ins && !live_range_start [dreg]) {
11020 live_range_start [dreg] = def_ins;
11021 live_range_start_bb [dreg] = bb;
11028 num_sregs = mono_inst_get_src_registers (ins, sregs);
11029 for (srcindex = 0; srcindex < 3; ++srcindex) {
11030 regtype = spec [MONO_INST_SRC1 + srcindex];
11031 sreg = sregs [srcindex];
11033 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11034 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11035 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11036 MonoInst *use_ins = ins;
11037 MonoInst *load_ins;
11038 guint32 load_opcode;
11040 if (var->opcode == OP_REGVAR) {
11041 sregs [srcindex] = var->dreg;
11042 //mono_inst_set_src_registers (ins, sregs);
11043 live_range_end [sreg] = use_ins;
11044 live_range_end_bb [sreg] = bb;
11048 g_assert (var->opcode == OP_REGOFFSET);
11050 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11052 g_assert (load_opcode != OP_LOADV_MEMBASE);
11054 if (vreg_to_lvreg [sreg]) {
11055 g_assert (vreg_to_lvreg [sreg] != -1);
11057 /* The variable is already loaded to an lvreg */
11058 if (G_UNLIKELY (cfg->verbose_level > 2))
11059 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11060 sregs [srcindex] = vreg_to_lvreg [sreg];
11061 //mono_inst_set_src_registers (ins, sregs);
11065 /* Try to fuse the load into the instruction */
11066 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11067 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11068 sregs [0] = var->inst_basereg;
11069 //mono_inst_set_src_registers (ins, sregs);
11070 ins->inst_offset = var->inst_offset;
11071 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11072 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11073 sregs [1] = var->inst_basereg;
11074 //mono_inst_set_src_registers (ins, sregs);
11075 ins->inst_offset = var->inst_offset;
11077 if (MONO_IS_REAL_MOVE (ins)) {
11078 ins->opcode = OP_NOP;
11081 //printf ("%d ", srcindex); mono_print_ins (ins);
11083 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11085 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11086 if (var->dreg == prev_dreg) {
11088 * sreg refers to the value loaded by the load
11089 * emitted below, but we need to use ins->dreg
11090 * since it refers to the store emitted earlier.
11094 g_assert (sreg != -1);
11095 vreg_to_lvreg [var->dreg] = sreg;
11096 g_assert (lvregs_len < 1024);
11097 lvregs [lvregs_len ++] = var->dreg;
11101 sregs [srcindex] = sreg;
11102 //mono_inst_set_src_registers (ins, sregs);
11104 if (regtype == 'l') {
11105 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11106 mono_bblock_insert_before_ins (bb, ins, load_ins);
11107 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11108 mono_bblock_insert_before_ins (bb, ins, load_ins);
11109 use_ins = load_ins;
11112 #if SIZEOF_REGISTER == 4
11113 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11115 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11116 mono_bblock_insert_before_ins (bb, ins, load_ins);
11117 use_ins = load_ins;
11121 if (var->dreg < orig_next_vreg) {
11122 live_range_end [var->dreg] = use_ins;
11123 live_range_end_bb [var->dreg] = bb;
11127 mono_inst_set_src_registers (ins, sregs);
11129 if (dest_has_lvreg) {
11130 g_assert (ins->dreg != -1);
11131 vreg_to_lvreg [prev_dreg] = ins->dreg;
11132 g_assert (lvregs_len < 1024);
11133 lvregs [lvregs_len ++] = prev_dreg;
11134 dest_has_lvreg = FALSE;
11138 tmp_reg = ins->dreg;
11139 ins->dreg = ins->sreg2;
11140 ins->sreg2 = tmp_reg;
11143 if (MONO_IS_CALL (ins)) {
11144 /* Clear vreg_to_lvreg array */
11145 for (i = 0; i < lvregs_len; i++)
11146 vreg_to_lvreg [lvregs [i]] = 0;
11148 } else if (ins->opcode == OP_NOP) {
11150 MONO_INST_NULLIFY_SREGS (ins);
11153 if (cfg->verbose_level > 2)
11154 mono_print_ins_index (1, ins);
11157 /* Extend the live range based on the liveness info */
11158 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11159 for (i = 0; i < cfg->num_varinfo; i ++) {
11160 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11162 if (vreg_is_volatile (cfg, vi->vreg))
11163 /* The liveness info is incomplete */
11166 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11167 /* Live from at least the first ins of this bb */
11168 live_range_start [vi->vreg] = bb->code;
11169 live_range_start_bb [vi->vreg] = bb;
11172 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11173 /* Live at least until the last ins of this bb */
11174 live_range_end [vi->vreg] = bb->last_ins;
11175 live_range_end_bb [vi->vreg] = bb;
11181 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11183 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11184 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11186 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11187 for (i = 0; i < cfg->num_varinfo; ++i) {
11188 int vreg = MONO_VARINFO (cfg, i)->vreg;
11191 if (live_range_start [vreg]) {
11192 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11194 ins->inst_c1 = vreg;
11195 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11197 if (live_range_end [vreg]) {
11198 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11200 ins->inst_c1 = vreg;
11201 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11202 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11204 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11210 g_free (live_range_start);
11211 g_free (live_range_end);
11212 g_free (live_range_start_bb);
11213 g_free (live_range_end_bb);
11218 * - use 'iadd' instead of 'int_add'
11219 * - handling ovf opcodes: decompose in method_to_ir.
11220 * - unify iregs/fregs
11221 * -> partly done, the missing parts are:
11222 * - a more complete unification would involve unifying the hregs as well, so
11223 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11224 * would no longer map to the machine hregs, so the code generators would need to
11225 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11226 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11227 * fp/non-fp branches speeds it up by about 15%.
11228 * - use sext/zext opcodes instead of shifts
11230 * - get rid of TEMPLOADs if possible and use vregs instead
11231 * - clean up usage of OP_P/OP_ opcodes
11232 * - cleanup usage of DUMMY_USE
11233 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11235 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11236 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11237 * - make sure handle_stack_args () is called before the branch is emitted
11238 * - when the new IR is done, get rid of all unused stuff
11239 * - COMPARE/BEQ as separate instructions or unify them ?
11240 * - keeping them separate allows specialized compare instructions like
11241 * compare_imm, compare_membase
11242 * - most back ends unify fp compare+branch, fp compare+ceq
11243 * - integrate mono_save_args into inline_method
11244 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11245 * - handle long shift opts on 32 bit platforms somehow: they require
11246 * 3 sregs (2 for arg1 and 1 for arg2)
11247 * - make byref a 'normal' type.
11248 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11249 * variable if needed.
11250 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11251 * like inline_method.
11252 * - remove inlining restrictions
11253 * - fix LNEG and enable cfold of INEG
11254 * - generalize x86 optimizations like ldelema as a peephole optimization
11255 * - add store_mem_imm for amd64
11256 * - optimize the loading of the interruption flag in the managed->native wrappers
11257 * - avoid special handling of OP_NOP in passes
11258 * - move code inserting instructions into one function/macro.
11259 * - try a coalescing phase after liveness analysis
11260 * - add float -> vreg conversion + local optimizations on !x86
11261 * - figure out how to handle decomposed branches during optimizations, ie.
11262 * compare+branch, op_jump_table+op_br etc.
11263 * - promote RuntimeXHandles to vregs
11264 * - vtype cleanups:
11265 * - add a NEW_VARLOADA_VREG macro
11266 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11267 * accessing vtype fields.
11268 * - get rid of I8CONST on 64 bit platforms
11269 * - dealing with the increase in code size due to branches created during opcode
11271 * - use extended basic blocks
11272 * - all parts of the JIT
11273 * - handle_global_vregs () && local regalloc
11274 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11275 * - sources of increase in code size:
11278 * - isinst and castclass
11279 * - lvregs not allocated to global registers even if used multiple times
11280 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11282 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11283 * - add all micro optimizations from the old JIT
11284 * - put tree optimizations into the deadce pass
11285 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11286 * specific function.
11287 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11288 * fcompare + branchCC.
11289 * - create a helper function for allocating a stack slot, taking into account
11290 * MONO_CFG_HAS_SPILLUP.
11292 * - merge the ia64 switch changes.
11293 * - optimize mono_regstate2_alloc_int/float.
11294 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11295 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11296 * parts of the tree could be separated by other instructions, killing the tree
11297 * arguments, or stores killing loads etc. Also, should we fold loads into other
11298 * instructions if the result of the load is used multiple times ?
11299 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11300 * - LAST MERGE: 108395.
11301 * - when returning vtypes in registers, generate IR and append it to the end of the
11302 * last bb instead of doing it in the epilog.
11303 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11311 - When to decompose opcodes:
11312 - earlier: this makes some optimizations hard to implement, since the low level IR
11313 no longer contains the neccessary information. But it is easier to do.
11314 - later: harder to implement, enables more optimizations.
11315 - Branches inside bblocks:
11316 - created when decomposing complex opcodes.
11317 - branches to another bblock: harmless, but not tracked by the branch
11318 optimizations, so need to branch to a label at the start of the bblock.
11319 - branches to inside the same bblock: very problematic, trips up the local
11320 reg allocator. Can be fixed by spitting the current bblock, but that is a
11321 complex operation, since some local vregs can become global vregs etc.
11322 - Local/global vregs:
11323 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11324 local register allocator.
11325 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11326 structure, created by mono_create_var (). Assigned to hregs or the stack by
11327 the global register allocator.
11328 - When to do optimizations like alu->alu_imm:
11329 - earlier -> saves work later on since the IR will be smaller/simpler
11330 - later -> can work on more instructions
11331 - Handling of valuetypes:
11332 - When a vtype is pushed on the stack, a new temporary is created, an
11333 instruction computing its address (LDADDR) is emitted and pushed on
11334 the stack. Need to optimize cases when the vtype is used immediately as in
11335 argument passing, stloc etc.
11336 - Instead of the to_end stuff in the old JIT, simply call the function handling
11337 the values on the stack before emitting the last instruction of the bb.
11340 #endif /* DISABLE_JIT */