2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg = alloc_preg (cfg);
2091 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2092 } else if (cfg->compile_aot) {
2093 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2096 MONO_INST_NEW (cfg, ins, OP_PCONST);
2097 ins->inst_p0 = call->method;
2098 ins->dreg = method_reg;
2099 MONO_ADD_INS (cfg->cbb, ins);
2103 if (COMPILE_LLVM (cfg))
2104 call->imt_arg_reg = method_reg;
2106 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2108 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2113 static MonoJumpInfo *
2114 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2116 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2120 ji->data.target = target;
2125 inline static MonoCallInst *
2126 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2127 MonoInst **args, int calli, int virtual, int tail)
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2135 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2137 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2140 call->signature = sig;
2142 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2145 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2146 call->vret_var = cfg->vret_addr;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2150 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2153 temp->backend.is_pinvoke = sig->pinvoke;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2164 loada->dreg = alloc_preg (cfg);
2165 loada->inst_p0 = temp;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada->inst_p1 = call;
2168 MONO_ADD_INS (cfg->cbb, loada);
2170 call->inst.dreg = temp->dreg;
2172 call->vret_var = loada;
2173 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2174 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2185 MonoInst *in = call->args [i];
2187 if (i >= sig->hasthis)
2188 t = sig->params [i - sig->hasthis];
2190 t = &mono_defaults.int_class->byval_arg;
2191 t = mono_type_get_underlying_type (t);
2193 if (!t->byref && t->type == MONO_TYPE_R4) {
2194 MonoInst *iargs [1];
2198 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2200 /* The result will be in an int vreg */
2201 call->args [i] = conv;
2208 if (COMPILE_LLVM (cfg))
2209 mono_llvm_emit_call (cfg, call);
2211 mono_arch_emit_call (cfg, call);
2213 mono_arch_emit_call (cfg, call);
2216 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2217 cfg->flags |= MONO_CFG_HAS_CALLS;
2222 inline static MonoInst*
2223 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2225 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2227 call->inst.sreg1 = addr->dreg;
2229 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2231 return (MonoInst*)call;
2235 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2239 cfg->uses_rgctx_reg = TRUE;
2240 call->rgctx_reg = TRUE;
2242 call->rgctx_arg_reg = rgctx_reg;
2249 inline static MonoInst*
2250 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2256 rgctx_reg = mono_alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2259 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2261 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2262 return (MonoInst*)call;
2266 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2271 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2272 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2274 gboolean might_be_remote;
2275 gboolean virtual = this != NULL;
2276 gboolean enable_for_aot = TRUE;
2280 if (method->string_ctor) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2284 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2289 might_be_remote = this && sig->hasthis &&
2290 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2291 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2293 context_used = mono_method_check_context_used (method);
2294 if (might_be_remote && context_used) {
2297 g_assert (cfg->generic_sharing_context);
2299 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2301 return mono_emit_calli (cfg, sig, args, addr);
2304 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2306 if (might_be_remote)
2307 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2309 call->method = method;
2310 call->inst.flags |= MONO_INST_HAS_METHOD;
2311 call->inst.inst_left = this;
2314 int vtable_reg, slot_reg, this_reg;
2316 this_reg = this->dreg;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2322 /* Make a call to delegate->invoke_impl */
2323 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 call->inst.inst_basereg = this_reg;
2325 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2326 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2328 return (MonoInst*)call;
2332 if ((!cfg->compile_aot || enable_for_aot) &&
2333 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2334 (MONO_METHOD_IS_FINAL (method) &&
2335 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2336 !(method->klass->marshalbyref && context_used)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2349 if (!method->string_ctor)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2352 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2359 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2367 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2368 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2370 return (MonoInst*)call;
2373 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2375 vtable_reg = alloc_preg (cfg);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2377 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2379 #ifdef MONO_ARCH_HAVE_IMT
2381 guint32 imt_slot = mono_method_get_imt_slot (method);
2382 emit_imt_argument (cfg, call, imt_arg);
2383 slot_reg = vtable_reg;
2384 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2387 if (slot_reg == -1) {
2388 slot_reg = alloc_preg (cfg);
2389 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2390 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2393 slot_reg = vtable_reg;
2394 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2395 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2396 #ifdef MONO_ARCH_HAVE_IMT
2398 g_assert (mono_method_signature (method)->generic_param_count);
2399 emit_imt_argument (cfg, call, imt_arg);
2404 call->inst.sreg1 = slot_reg;
2405 call->virtual = TRUE;
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2414 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2415 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2422 rgctx_reg = mono_alloc_preg (cfg);
2423 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2425 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2427 call = (MonoCallInst*)ins;
2429 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2435 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2437 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst*
2472 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2473 MonoMethodSignature *sig, MonoInst **args)
2475 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2482 if (cfg->abs_patches == NULL)
2483 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2484 g_hash_table_insert (cfg->abs_patches, ji, ji);
2485 ins = mono_emit_native_call (cfg, ji, sig, args);
2486 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2491 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2494 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2502 case OP_LOADI1_MEMBASE:
2503 widen_op = OP_ICONV_TO_I1;
2505 case OP_LOADU1_MEMBASE:
2506 widen_op = OP_ICONV_TO_U1;
2508 case OP_LOADI2_MEMBASE:
2509 widen_op = OP_ICONV_TO_I2;
2511 case OP_LOADU2_MEMBASE:
2512 widen_op = OP_ICONV_TO_U2;
2518 if (widen_op != -1) {
2519 int dreg = alloc_preg (cfg);
2522 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2523 widen->type = ins->type;
2533 get_memcpy_method (void)
2535 static MonoMethod *memcpy_method = NULL;
2536 if (!memcpy_method) {
2537 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method;
2544 #if HAVE_WRITE_BARRIERS
2547 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2549 MonoClassField *field;
2550 gpointer iter = NULL;
2552 while ((field = mono_class_get_fields (klass, &iter))) {
2555 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2557 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2558 if (mono_type_is_reference (field->type)) {
2559 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2560 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2562 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2563 MonoClass *field_class = mono_class_from_mono_type (field->type);
2564 if (field_class->has_references)
2565 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2571 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2573 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2574 unsigned need_wb = 0;
2579 /*types with references can't have alignment smaller than sizeof(void*) */
2580 if (align < SIZEOF_VOID_P)
2583 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2584 if (size > 32 * SIZEOF_VOID_P)
2587 create_write_barrier_bitmap (klass, &need_wb, 0);
2589 /* We don't unroll more than 5 stores to avoid code bloat. */
2590 if (size > 5 * SIZEOF_VOID_P) {
2591 /*FIXME this is a temporary fix while issues with valuetypes are solved.*/
2592 #if SIZEOF_VOID_P == 8
2595 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2596 size += (SIZEOF_VOID_P - 1);
2597 size &= ~(SIZEOF_VOID_P - 1);
2599 EMIT_NEW_ICONST (cfg, iargs [2], size);
2600 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2601 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2605 destreg = iargs [0]->dreg;
2606 srcreg = iargs [1]->dreg;
2609 dest_ptr_reg = alloc_preg (cfg);
2610 tmp_reg = alloc_preg (cfg);
2613 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2615 while (size >= SIZEOF_VOID_P) {
2616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2617 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2619 if (need_wb & 0x1) {
2620 MonoInst *dummy_use;
2622 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2623 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2625 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2626 dummy_use->sreg1 = dest_ptr_reg;
2627 MONO_ADD_INS (cfg->cbb, dummy_use);
2631 offset += SIZEOF_VOID_P;
2632 size -= SIZEOF_VOID_P;
2635 /*tmp += sizeof (void*)*/
2636 if (size >= SIZEOF_VOID_P) {
2637 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2638 MONO_ADD_INS (cfg->cbb, iargs [0]);
2642 /* Those cannot be references since size < sizeof (void*) */
2644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2652 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2658 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2659 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2669 * Emit code to copy a valuetype of type @klass whose address is stored in
2670 * @src->dreg to memory whose address is stored at @dest->dreg.
2673 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2675 MonoInst *iargs [4];
2678 MonoMethod *memcpy_method;
2682 * This check breaks with spilled vars... need to handle it during verification anyway.
2683 * g_assert (klass && klass == src->klass && klass == dest->klass);
2687 n = mono_class_native_size (klass, &align);
2689 n = mono_class_value_size (klass, &align);
2691 #if HAVE_WRITE_BARRIERS
2692 /* if native is true there should be no references in the struct */
2693 if (klass->has_references && !native) {
2694 /* Avoid barriers when storing to the stack */
2695 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2696 (dest->opcode == OP_LDADDR))) {
2697 int context_used = 0;
2702 if (cfg->generic_sharing_context)
2703 context_used = mono_class_check_context_used (klass);
2705 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2706 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2708 } else if (context_used) {
2709 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2711 if (cfg->compile_aot) {
2712 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2714 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2715 mono_class_compute_gc_descriptor (klass);
2719 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2725 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2726 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2727 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2731 EMIT_NEW_ICONST (cfg, iargs [2], n);
2733 memcpy_method = get_memcpy_method ();
2734 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2739 get_memset_method (void)
2741 static MonoMethod *memset_method = NULL;
2742 if (!memset_method) {
2743 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2745 g_error ("Old corlib found. Install a new one");
2747 return memset_method;
2751 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2753 MonoInst *iargs [3];
2756 MonoMethod *memset_method;
2758 /* FIXME: Optimize this for the case when dest is an LDADDR */
2760 mono_class_init (klass);
2761 n = mono_class_value_size (klass, &align);
2763 if (n <= sizeof (gpointer) * 5) {
2764 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2767 memset_method = get_memset_method ();
2769 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2770 EMIT_NEW_ICONST (cfg, iargs [2], n);
2771 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2776 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2778 MonoInst *this = NULL;
2780 g_assert (cfg->generic_sharing_context);
2782 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2783 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2784 !method->klass->valuetype)
2785 EMIT_NEW_ARGLOAD (cfg, this, 0);
2787 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2788 MonoInst *mrgctx_loc, *mrgctx_var;
2791 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2793 mrgctx_loc = mono_get_vtable_var (cfg);
2794 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2797 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2798 MonoInst *vtable_loc, *vtable_var;
2802 vtable_loc = mono_get_vtable_var (cfg);
2803 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2805 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2806 MonoInst *mrgctx_var = vtable_var;
2809 vtable_reg = alloc_preg (cfg);
2810 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2811 vtable_var->type = STACK_PTR;
2817 int vtable_reg, res_reg;
2819 vtable_reg = alloc_preg (cfg);
2820 res_reg = alloc_preg (cfg);
2821 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2826 static MonoJumpInfoRgctxEntry *
2827 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2829 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2830 res->method = method;
2831 res->in_mrgctx = in_mrgctx;
2832 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2833 res->data->type = patch_type;
2834 res->data->data.target = patch_data;
2835 res->info_type = info_type;
2840 static inline MonoInst*
2841 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2843 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2847 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2848 MonoClass *klass, int rgctx_type)
2850 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2851 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2853 return emit_rgctx_fetch (cfg, rgctx, entry);
2857 * emit_get_rgctx_method:
2859 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2860 * normal constants, else emit a load from the rgctx.
2863 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2864 MonoMethod *cmethod, int rgctx_type)
2866 if (!context_used) {
2869 switch (rgctx_type) {
2870 case MONO_RGCTX_INFO_METHOD:
2871 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2873 case MONO_RGCTX_INFO_METHOD_RGCTX:
2874 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2877 g_assert_not_reached ();
2880 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2881 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2883 return emit_rgctx_fetch (cfg, rgctx, entry);
2888 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2889 MonoClassField *field, int rgctx_type)
2891 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2892 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2894 return emit_rgctx_fetch (cfg, rgctx, entry);
2898 * On return the caller must check @klass for load errors.
2901 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2903 MonoInst *vtable_arg;
2905 int context_used = 0;
2907 if (cfg->generic_sharing_context)
2908 context_used = mono_class_check_context_used (klass);
2911 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2912 klass, MONO_RGCTX_INFO_VTABLE);
2914 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2918 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2921 if (COMPILE_LLVM (cfg))
2922 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2924 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2925 #ifdef MONO_ARCH_VTABLE_REG
2926 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2927 cfg->uses_vtable_reg = TRUE;
2934 * On return the caller must check @array_class for load errors
2937 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2939 int vtable_reg = alloc_preg (cfg);
2940 int context_used = 0;
2942 if (cfg->generic_sharing_context)
2943 context_used = mono_class_check_context_used (array_class);
2945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2947 if (cfg->opt & MONO_OPT_SHARED) {
2948 int class_reg = alloc_preg (cfg);
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2950 if (cfg->compile_aot) {
2951 int klass_reg = alloc_preg (cfg);
2952 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2953 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2957 } else if (context_used) {
2958 MonoInst *vtable_ins;
2960 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2961 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2963 if (cfg->compile_aot) {
2967 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2969 vt_reg = alloc_preg (cfg);
2970 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2971 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2974 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2980 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2984 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2986 if (mini_get_debug_options ()->better_cast_details) {
2987 int to_klass_reg = alloc_preg (cfg);
2988 int vtable_reg = alloc_preg (cfg);
2989 int klass_reg = alloc_preg (cfg);
2990 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2993 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2997 MONO_ADD_INS (cfg->cbb, tls_get);
2998 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2999 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3001 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3002 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3003 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3008 reset_cast_details (MonoCompile *cfg)
3010 /* Reset the variables holding the cast details */
3011 if (mini_get_debug_options ()->better_cast_details) {
3012 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3014 MONO_ADD_INS (cfg->cbb, tls_get);
3015 /* It is enough to reset the from field */
3016 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3021 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3022 * generic code is generated.
3025 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3027 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3030 MonoInst *rgctx, *addr;
3032 /* FIXME: What if the class is shared? We might not
3033 have to get the address of the method from the
3035 addr = emit_get_rgctx_method (cfg, context_used, method,
3036 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3038 rgctx = emit_get_rgctx (cfg, method, context_used);
3040 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3042 return mono_emit_method_call (cfg, method, &val, NULL);
3047 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3051 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3052 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3053 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3054 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3056 obj_reg = sp [0]->dreg;
3057 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3058 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3060 /* FIXME: generics */
3061 g_assert (klass->rank == 0);
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3065 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3071 MonoInst *element_class;
3073 /* This assertion is from the unboxcast insn */
3074 g_assert (klass->rank == 0);
3076 element_class = emit_get_rgctx_klass (cfg, context_used,
3077 klass->element_class, MONO_RGCTX_INFO_KLASS);
3079 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3080 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3082 save_cast_details (cfg, klass->element_class, obj_reg);
3083 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3084 reset_cast_details (cfg);
3087 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3088 MONO_ADD_INS (cfg->cbb, add);
3089 add->type = STACK_MP;
3096 * Returns NULL and set the cfg exception on error.
3099 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3101 MonoInst *iargs [2];
3107 MonoInst *iargs [2];
3110 FIXME: we cannot get managed_alloc here because we can't get
3111 the class's vtable (because it's not a closed class)
3113 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3114 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3117 if (cfg->opt & MONO_OPT_SHARED)
3118 rgctx_info = MONO_RGCTX_INFO_KLASS;
3120 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3121 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3123 if (cfg->opt & MONO_OPT_SHARED) {
3124 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3126 alloc_ftn = mono_object_new;
3129 alloc_ftn = mono_object_new_specific;
3132 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3135 if (cfg->opt & MONO_OPT_SHARED) {
3136 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3137 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3139 alloc_ftn = mono_object_new;
3140 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3141 /* This happens often in argument checking code, eg. throw new FooException... */
3142 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3143 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3144 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3146 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3147 MonoMethod *managed_alloc = NULL;
3151 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3152 cfg->exception_ptr = klass;
3156 #ifndef MONO_CROSS_COMPILE
3157 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3160 if (managed_alloc) {
3161 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3162 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3164 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3166 guint32 lw = vtable->klass->instance_size;
3167 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3168 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3169 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3172 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3176 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3180 * Returns NULL and set the cfg exception on error.
3183 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3185 MonoInst *alloc, *ins;
3187 if (mono_class_is_nullable (klass)) {
3188 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3191 /* FIXME: What if the class is shared? We might not
3192 have to get the method address from the RGCTX. */
3193 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3194 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3195 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3197 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3199 return mono_emit_method_call (cfg, method, &val, NULL);
3203 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3207 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3212 // FIXME: This doesn't work yet (class libs tests fail?)
3213 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3216 * Returns NULL and set the cfg exception on error.
3219 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3221 MonoBasicBlock *is_null_bb;
3222 int obj_reg = src->dreg;
3223 int vtable_reg = alloc_preg (cfg);
3224 MonoInst *klass_inst = NULL;
3229 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3230 klass, MONO_RGCTX_INFO_KLASS);
3232 if (is_complex_isinst (klass)) {
3233 /* Complex case, handle by an icall */
3239 args [1] = klass_inst;
3241 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3243 /* Simple case, handled by the code below */
3247 NEW_BBLOCK (cfg, is_null_bb);
3249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3250 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3252 save_cast_details (cfg, klass, obj_reg);
3254 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3256 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3258 int klass_reg = alloc_preg (cfg);
3260 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3262 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3263 /* the remoting code is broken, access the class for now */
3264 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3265 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3267 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3268 cfg->exception_ptr = klass;
3271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3276 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3278 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3279 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3283 MONO_START_BB (cfg, is_null_bb);
3285 reset_cast_details (cfg);
3291 * Returns NULL and set the cfg exception on error.
3294 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3297 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3298 int obj_reg = src->dreg;
3299 int vtable_reg = alloc_preg (cfg);
3300 int res_reg = alloc_preg (cfg);
3301 MonoInst *klass_inst = NULL;
3304 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3306 if (is_complex_isinst (klass)) {
3309 /* Complex case, handle by an icall */
3315 args [1] = klass_inst;
3317 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3319 /* Simple case, the code below can handle it */
3323 NEW_BBLOCK (cfg, is_null_bb);
3324 NEW_BBLOCK (cfg, false_bb);
3325 NEW_BBLOCK (cfg, end_bb);
3327 /* Do the assignment at the beginning, so the other assignment can be if converted */
3328 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3329 ins->type = STACK_OBJ;
3332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3337 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3338 g_assert (!context_used);
3339 /* the is_null_bb target simply copies the input register to the output */
3340 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3342 int klass_reg = alloc_preg (cfg);
3345 int rank_reg = alloc_preg (cfg);
3346 int eclass_reg = alloc_preg (cfg);
3348 g_assert (!context_used);
3349 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3350 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3354 if (klass->cast_class == mono_defaults.object_class) {
3355 int parent_reg = alloc_preg (cfg);
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3357 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3358 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3360 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3361 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3362 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3363 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3364 } else if (klass->cast_class == mono_defaults.enum_class) {
3365 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3367 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3368 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3370 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3371 /* Check that the object is a vector too */
3372 int bounds_reg = alloc_preg (cfg);
3373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3378 /* the is_null_bb target simply copies the input register to the output */
3379 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3381 } else if (mono_class_is_nullable (klass)) {
3382 g_assert (!context_used);
3383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3384 /* the is_null_bb target simply copies the input register to the output */
3385 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3387 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3388 g_assert (!context_used);
3389 /* the remoting code is broken, access the class for now */
3390 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3391 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3393 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3394 cfg->exception_ptr = klass;
3397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3405 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3406 /* the is_null_bb target simply copies the input register to the output */
3407 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3412 MONO_START_BB (cfg, false_bb);
3414 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3415 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3417 MONO_START_BB (cfg, is_null_bb);
3419 MONO_START_BB (cfg, end_bb);
3425 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3427 /* This opcode takes as input an object reference and a class, and returns:
3428 0) if the object is an instance of the class,
3429 1) if the object is not instance of the class,
3430 2) if the object is a proxy whose type cannot be determined */
3433 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3434 int obj_reg = src->dreg;
3435 int dreg = alloc_ireg (cfg);
3437 int klass_reg = alloc_preg (cfg);
3439 NEW_BBLOCK (cfg, true_bb);
3440 NEW_BBLOCK (cfg, false_bb);
3441 NEW_BBLOCK (cfg, false2_bb);
3442 NEW_BBLOCK (cfg, end_bb);
3443 NEW_BBLOCK (cfg, no_proxy_bb);
3445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3448 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3449 NEW_BBLOCK (cfg, interface_fail_bb);
3451 tmp_reg = alloc_preg (cfg);
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3453 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3454 MONO_START_BB (cfg, interface_fail_bb);
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3457 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3459 tmp_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3464 tmp_reg = alloc_preg (cfg);
3465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3468 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3469 tmp_reg = alloc_preg (cfg);
3470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3473 tmp_reg = alloc_preg (cfg);
3474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3475 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3478 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3479 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3481 MONO_START_BB (cfg, no_proxy_bb);
3483 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3486 MONO_START_BB (cfg, false_bb);
3488 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3491 MONO_START_BB (cfg, false2_bb);
3493 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3494 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3496 MONO_START_BB (cfg, true_bb);
3498 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3500 MONO_START_BB (cfg, end_bb);
3503 MONO_INST_NEW (cfg, ins, OP_ICONST);
3505 ins->type = STACK_I4;
3511 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3513 /* This opcode takes as input an object reference and a class, and returns:
3514 0) if the object is an instance of the class,
3515 1) if the object is a proxy whose type cannot be determined
3516 an InvalidCastException exception is thrown otherwhise*/
3519 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3520 int obj_reg = src->dreg;
3521 int dreg = alloc_ireg (cfg);
3522 int tmp_reg = alloc_preg (cfg);
3523 int klass_reg = alloc_preg (cfg);
3525 NEW_BBLOCK (cfg, end_bb);
3526 NEW_BBLOCK (cfg, ok_result_bb);
3528 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3531 save_cast_details (cfg, klass, obj_reg);
3533 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3534 NEW_BBLOCK (cfg, interface_fail_bb);
3536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3537 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3538 MONO_START_BB (cfg, interface_fail_bb);
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3541 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3543 tmp_reg = alloc_preg (cfg);
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3546 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3548 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3552 NEW_BBLOCK (cfg, no_proxy_bb);
3554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3556 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3558 tmp_reg = alloc_preg (cfg);
3559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3562 tmp_reg = alloc_preg (cfg);
3563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3567 NEW_BBLOCK (cfg, fail_1_bb);
3569 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3571 MONO_START_BB (cfg, fail_1_bb);
3573 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3576 MONO_START_BB (cfg, no_proxy_bb);
3578 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3581 MONO_START_BB (cfg, ok_result_bb);
3583 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3585 MONO_START_BB (cfg, end_bb);
3588 MONO_INST_NEW (cfg, ins, OP_ICONST);
3590 ins->type = STACK_I4;
3596 * Returns NULL and set the cfg exception on error.
3598 static G_GNUC_UNUSED MonoInst*
3599 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3601 gpointer *trampoline;
3602 MonoInst *obj, *method_ins, *tramp_ins;
3606 obj = handle_alloc (cfg, klass, FALSE, 0);
3610 /* Inline the contents of mono_delegate_ctor */
3612 /* Set target field */
3613 /* Optimize away setting of NULL target */
3614 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3617 /* Set method field */
3618 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3619 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3622 * To avoid looking up the compiled code belonging to the target method
3623 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3624 * store it, and we fill it after the method has been compiled.
3626 if (!cfg->compile_aot && !method->dynamic) {
3627 MonoInst *code_slot_ins;
3630 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3632 domain = mono_domain_get ();
3633 mono_domain_lock (domain);
3634 if (!domain_jit_info (domain)->method_code_hash)
3635 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3636 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3638 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3639 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3641 mono_domain_unlock (domain);
3643 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3648 /* Set invoke_impl field */
3649 if (cfg->compile_aot) {
3650 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3652 trampoline = mono_create_delegate_trampoline (klass);
3653 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3657 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3663 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3665 MonoJitICallInfo *info;
3667 /* Need to register the icall so it gets an icall wrapper */
3668 info = mono_get_array_new_va_icall (rank);
3670 cfg->flags |= MONO_CFG_HAS_VARARGS;
3672 /* mono_array_new_va () needs a vararg calling convention */
3673 cfg->disable_llvm = TRUE;
3675 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3676 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3680 mono_emit_load_got_addr (MonoCompile *cfg)
3682 MonoInst *getaddr, *dummy_use;
3684 if (!cfg->got_var || cfg->got_var_allocated)
3687 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3688 getaddr->dreg = cfg->got_var->dreg;
3690 /* Add it to the start of the first bblock */
3691 if (cfg->bb_entry->code) {
3692 getaddr->next = cfg->bb_entry->code;
3693 cfg->bb_entry->code = getaddr;
3696 MONO_ADD_INS (cfg->bb_entry, getaddr);
3698 cfg->got_var_allocated = TRUE;
3701 * Add a dummy use to keep the got_var alive, since real uses might
3702 * only be generated by the back ends.
3703 * Add it to end_bblock, so the variable's lifetime covers the whole
3705 * It would be better to make the usage of the got var explicit in all
3706 * cases when the backend needs it (i.e. calls, throw etc.), so this
3707 * wouldn't be needed.
3709 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3710 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3713 static int inline_limit;
3714 static gboolean inline_limit_inited;
3717 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3719 MonoMethodHeaderSummary header;
3721 #ifdef MONO_ARCH_SOFT_FLOAT
3722 MonoMethodSignature *sig = mono_method_signature (method);
3726 if (cfg->generic_sharing_context)
3729 if (cfg->inline_depth > 10)
3732 #ifdef MONO_ARCH_HAVE_LMF_OPS
3733 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3734 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3735 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3740 if (!mono_method_get_header_summary (method, &header))
3743 /*runtime, icall and pinvoke are checked by summary call*/
3744 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3745 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3746 (method->klass->marshalbyref) ||
3750 /* also consider num_locals? */
3751 /* Do the size check early to avoid creating vtables */
3752 if (!inline_limit_inited) {
3753 if (getenv ("MONO_INLINELIMIT"))
3754 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3756 inline_limit = INLINE_LENGTH_LIMIT;
3757 inline_limit_inited = TRUE;
3759 if (header.code_size >= inline_limit)
3763 * if we can initialize the class of the method right away, we do,
3764 * otherwise we don't allow inlining if the class needs initialization,
3765 * since it would mean inserting a call to mono_runtime_class_init()
3766 * inside the inlined code
3768 if (!(cfg->opt & MONO_OPT_SHARED)) {
3769 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3770 if (cfg->run_cctors && method->klass->has_cctor) {
3771 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3772 if (!method->klass->runtime_info)
3773 /* No vtable created yet */
3775 vtable = mono_class_vtable (cfg->domain, method->klass);
3778 /* This makes so that inline cannot trigger */
3779 /* .cctors: too many apps depend on them */
3780 /* running with a specific order... */
3781 if (! vtable->initialized)
3783 mono_runtime_class_init (vtable);
3785 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3786 if (!method->klass->runtime_info)
3787 /* No vtable created yet */
3789 vtable = mono_class_vtable (cfg->domain, method->klass);
3792 if (!vtable->initialized)
3797 * If we're compiling for shared code
3798 * the cctor will need to be run at aot method load time, for example,
3799 * or at the end of the compilation of the inlining method.
3801 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3806 * CAS - do not inline methods with declarative security
3807 * Note: this has to be before any possible return TRUE;
3809 if (mono_method_has_declsec (method))
3812 #ifdef MONO_ARCH_SOFT_FLOAT
3814 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3816 for (i = 0; i < sig->param_count; ++i)
3817 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3825 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3827 if (vtable->initialized && !cfg->compile_aot)
3830 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3833 if (!mono_class_needs_cctor_run (vtable->klass, method))
3836 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3837 /* The initialization is already done before the method is called */
3844 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3848 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3850 mono_class_init (klass);
3851 size = mono_class_array_element_size (klass);
3853 mult_reg = alloc_preg (cfg);
3854 array_reg = arr->dreg;
3855 index_reg = index->dreg;
3857 #if SIZEOF_REGISTER == 8
3858 /* The array reg is 64 bits but the index reg is only 32 */
3859 if (COMPILE_LLVM (cfg)) {
3861 index2_reg = index_reg;
3863 index2_reg = alloc_preg (cfg);
3864 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3867 if (index->type == STACK_I8) {
3868 index2_reg = alloc_preg (cfg);
3869 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3871 index2_reg = index_reg;
3876 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3878 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3879 if (size == 1 || size == 2 || size == 4 || size == 8) {
3880 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3882 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3883 ins->type = STACK_PTR;
3889 add_reg = alloc_preg (cfg);
3891 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3892 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3893 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3894 ins->type = STACK_PTR;
3895 MONO_ADD_INS (cfg->cbb, ins);
3900 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3902 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3904 int bounds_reg = alloc_preg (cfg);
3905 int add_reg = alloc_preg (cfg);
3906 int mult_reg = alloc_preg (cfg);
3907 int mult2_reg = alloc_preg (cfg);
3908 int low1_reg = alloc_preg (cfg);
3909 int low2_reg = alloc_preg (cfg);
3910 int high1_reg = alloc_preg (cfg);
3911 int high2_reg = alloc_preg (cfg);
3912 int realidx1_reg = alloc_preg (cfg);
3913 int realidx2_reg = alloc_preg (cfg);
3914 int sum_reg = alloc_preg (cfg);
3919 mono_class_init (klass);
3920 size = mono_class_array_element_size (klass);
3922 index1 = index_ins1->dreg;
3923 index2 = index_ins2->dreg;
3925 /* range checking */
3926 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3927 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3929 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3930 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3931 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3933 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3934 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3935 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3937 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3938 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3939 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3941 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3942 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3943 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3945 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3946 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3947 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3948 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3949 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3951 ins->type = STACK_MP;
3953 MONO_ADD_INS (cfg->cbb, ins);
3960 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3964 MonoMethod *addr_method;
3967 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3970 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3972 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3973 /* emit_ldelema_2 depends on OP_LMUL */
3974 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3975 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3979 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3980 addr_method = mono_marshal_get_array_address (rank, element_size);
3981 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3986 static MonoBreakPolicy
3987 always_insert_breakpoint (MonoMethod *method)
3989 return MONO_BREAK_POLICY_ALWAYS;
3992 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3995 * mono_set_break_policy:
3996 * policy_callback: the new callback function
3998 * Allow embedders to decide wherther to actually obey breakpoint instructions
3999 * (both break IL instructions and Debugger.Break () method calls), for example
4000 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4001 * untrusted or semi-trusted code.
4003 * @policy_callback will be called every time a break point instruction needs to
4004 * be inserted with the method argument being the method that calls Debugger.Break()
4005 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4006 * if it wants the breakpoint to not be effective in the given method.
4007 * #MONO_BREAK_POLICY_ALWAYS is the default.
4010 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4012 if (policy_callback)
4013 break_policy_func = policy_callback;
4015 break_policy_func = always_insert_breakpoint;
4019 should_insert_brekpoint (MonoMethod *method) {
4020 switch (break_policy_func (method)) {
4021 case MONO_BREAK_POLICY_ALWAYS:
4023 case MONO_BREAK_POLICY_NEVER:
4025 case MONO_BREAK_POLICY_ON_DBG:
4026 return mono_debug_using_mono_debugger ();
4028 g_warning ("Incorrect value returned from break policy callback");
4033 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4035 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4037 MonoInst *addr, *store, *load;
4038 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4040 /* the bounds check is already done by the callers */
4041 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4043 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4044 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4046 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4047 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4053 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4055 MonoInst *ins = NULL;
4056 #ifdef MONO_ARCH_SIMD_INTRINSICS
4057 if (cfg->opt & MONO_OPT_SIMD) {
4058 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4068 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4070 MonoInst *ins = NULL;
4072 static MonoClass *runtime_helpers_class = NULL;
4073 if (! runtime_helpers_class)
4074 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4075 "System.Runtime.CompilerServices", "RuntimeHelpers");
4077 if (cmethod->klass == mono_defaults.string_class) {
4078 if (strcmp (cmethod->name, "get_Chars") == 0) {
4079 int dreg = alloc_ireg (cfg);
4080 int index_reg = alloc_preg (cfg);
4081 int mult_reg = alloc_preg (cfg);
4082 int add_reg = alloc_preg (cfg);
4084 #if SIZEOF_REGISTER == 8
4085 /* The array reg is 64 bits but the index reg is only 32 */
4086 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4088 index_reg = args [1]->dreg;
4090 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4092 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4093 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4094 add_reg = ins->dreg;
4095 /* Avoid a warning */
4097 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4100 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4101 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4102 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4103 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4105 type_from_op (ins, NULL, NULL);
4107 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4108 int dreg = alloc_ireg (cfg);
4109 /* Decompose later to allow more optimizations */
4110 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4111 ins->type = STACK_I4;
4112 cfg->cbb->has_array_access = TRUE;
4113 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4116 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4117 int mult_reg = alloc_preg (cfg);
4118 int add_reg = alloc_preg (cfg);
4120 /* The corlib functions check for oob already. */
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4122 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4123 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4124 return cfg->cbb->last_ins;
4127 } else if (cmethod->klass == mono_defaults.object_class) {
4129 if (strcmp (cmethod->name, "GetType") == 0) {
4130 int dreg = alloc_preg (cfg);
4131 int vt_reg = alloc_preg (cfg);
4132 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4133 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4134 type_from_op (ins, NULL, NULL);
4137 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4138 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4139 int dreg = alloc_ireg (cfg);
4140 int t1 = alloc_ireg (cfg);
4142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4143 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4144 ins->type = STACK_I4;
4148 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4149 MONO_INST_NEW (cfg, ins, OP_NOP);
4150 MONO_ADD_INS (cfg->cbb, ins);
4154 } else if (cmethod->klass == mono_defaults.array_class) {
4155 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4156 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4157 if (cmethod->name [0] != 'g')
4160 if (strcmp (cmethod->name, "get_Rank") == 0) {
4161 int dreg = alloc_ireg (cfg);
4162 int vtable_reg = alloc_preg (cfg);
4163 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4164 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4165 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4166 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4167 type_from_op (ins, NULL, NULL);
4170 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4171 int dreg = alloc_ireg (cfg);
4173 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4174 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4175 type_from_op (ins, NULL, NULL);
4180 } else if (cmethod->klass == runtime_helpers_class) {
4182 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4183 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4187 } else if (cmethod->klass == mono_defaults.thread_class) {
4188 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4189 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4190 MONO_ADD_INS (cfg->cbb, ins);
4192 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4193 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4194 MONO_ADD_INS (cfg->cbb, ins);
4197 } else if (cmethod->klass == mono_defaults.monitor_class) {
4198 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4199 if (strcmp (cmethod->name, "Enter") == 0) {
4202 if (COMPILE_LLVM (cfg)) {
4204 * Pass the argument normally, the LLVM backend will handle the
4205 * calling convention problems.
4207 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4209 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4210 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4211 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4212 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4215 return (MonoInst*)call;
4216 } else if (strcmp (cmethod->name, "Exit") == 0) {
4219 if (COMPILE_LLVM (cfg)) {
4220 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4222 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4223 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4224 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4225 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4228 return (MonoInst*)call;
4230 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4231 MonoMethod *fast_method = NULL;
4233 /* Avoid infinite recursion */
4234 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4235 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4236 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4239 if (strcmp (cmethod->name, "Enter") == 0 ||
4240 strcmp (cmethod->name, "Exit") == 0)
4241 fast_method = mono_monitor_get_fast_path (cmethod);
4245 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4247 } else if (cmethod->klass->image == mono_defaults.corlib &&
4248 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4249 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4252 #if SIZEOF_REGISTER == 8
4253 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4254 /* 64 bit reads are already atomic */
4255 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4256 ins->dreg = mono_alloc_preg (cfg);
4257 ins->inst_basereg = args [0]->dreg;
4258 ins->inst_offset = 0;
4259 MONO_ADD_INS (cfg->cbb, ins);
4263 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4264 if (strcmp (cmethod->name, "Increment") == 0) {
4265 MonoInst *ins_iconst;
4268 if (fsig->params [0]->type == MONO_TYPE_I4)
4269 opcode = OP_ATOMIC_ADD_NEW_I4;
4270 #if SIZEOF_REGISTER == 8
4271 else if (fsig->params [0]->type == MONO_TYPE_I8)
4272 opcode = OP_ATOMIC_ADD_NEW_I8;
4275 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4276 ins_iconst->inst_c0 = 1;
4277 ins_iconst->dreg = mono_alloc_ireg (cfg);
4278 MONO_ADD_INS (cfg->cbb, ins_iconst);
4280 MONO_INST_NEW (cfg, ins, opcode);
4281 ins->dreg = mono_alloc_ireg (cfg);
4282 ins->inst_basereg = args [0]->dreg;
4283 ins->inst_offset = 0;
4284 ins->sreg2 = ins_iconst->dreg;
4285 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4286 MONO_ADD_INS (cfg->cbb, ins);
4288 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4289 MonoInst *ins_iconst;
4292 if (fsig->params [0]->type == MONO_TYPE_I4)
4293 opcode = OP_ATOMIC_ADD_NEW_I4;
4294 #if SIZEOF_REGISTER == 8
4295 else if (fsig->params [0]->type == MONO_TYPE_I8)
4296 opcode = OP_ATOMIC_ADD_NEW_I8;
4299 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4300 ins_iconst->inst_c0 = -1;
4301 ins_iconst->dreg = mono_alloc_ireg (cfg);
4302 MONO_ADD_INS (cfg->cbb, ins_iconst);
4304 MONO_INST_NEW (cfg, ins, opcode);
4305 ins->dreg = mono_alloc_ireg (cfg);
4306 ins->inst_basereg = args [0]->dreg;
4307 ins->inst_offset = 0;
4308 ins->sreg2 = ins_iconst->dreg;
4309 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4310 MONO_ADD_INS (cfg->cbb, ins);
4312 } else if (strcmp (cmethod->name, "Add") == 0) {
4315 if (fsig->params [0]->type == MONO_TYPE_I4)
4316 opcode = OP_ATOMIC_ADD_NEW_I4;
4317 #if SIZEOF_REGISTER == 8
4318 else if (fsig->params [0]->type == MONO_TYPE_I8)
4319 opcode = OP_ATOMIC_ADD_NEW_I8;
4323 MONO_INST_NEW (cfg, ins, opcode);
4324 ins->dreg = mono_alloc_ireg (cfg);
4325 ins->inst_basereg = args [0]->dreg;
4326 ins->inst_offset = 0;
4327 ins->sreg2 = args [1]->dreg;
4328 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4329 MONO_ADD_INS (cfg->cbb, ins);
4332 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4334 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4335 if (strcmp (cmethod->name, "Exchange") == 0) {
4337 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4339 if (fsig->params [0]->type == MONO_TYPE_I4)
4340 opcode = OP_ATOMIC_EXCHANGE_I4;
4341 #if SIZEOF_REGISTER == 8
4342 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4343 (fsig->params [0]->type == MONO_TYPE_I))
4344 opcode = OP_ATOMIC_EXCHANGE_I8;
4346 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4347 opcode = OP_ATOMIC_EXCHANGE_I4;
4352 MONO_INST_NEW (cfg, ins, opcode);
4353 ins->dreg = mono_alloc_ireg (cfg);
4354 ins->inst_basereg = args [0]->dreg;
4355 ins->inst_offset = 0;
4356 ins->sreg2 = args [1]->dreg;
4357 MONO_ADD_INS (cfg->cbb, ins);
4359 switch (fsig->params [0]->type) {
4361 ins->type = STACK_I4;
4365 ins->type = STACK_I8;
4367 case MONO_TYPE_OBJECT:
4368 ins->type = STACK_OBJ;
4371 g_assert_not_reached ();
4374 #if HAVE_WRITE_BARRIERS
4376 MonoInst *dummy_use;
4377 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4378 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4379 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4383 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4385 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4386 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4388 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4389 if (fsig->params [1]->type == MONO_TYPE_I4)
4391 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4392 size = sizeof (gpointer);
4393 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4396 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4397 ins->dreg = alloc_ireg (cfg);
4398 ins->sreg1 = args [0]->dreg;
4399 ins->sreg2 = args [1]->dreg;
4400 ins->sreg3 = args [2]->dreg;
4401 ins->type = STACK_I4;
4402 MONO_ADD_INS (cfg->cbb, ins);
4403 } else if (size == 8) {
4404 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4405 ins->dreg = alloc_ireg (cfg);
4406 ins->sreg1 = args [0]->dreg;
4407 ins->sreg2 = args [1]->dreg;
4408 ins->sreg3 = args [2]->dreg;
4409 ins->type = STACK_I8;
4410 MONO_ADD_INS (cfg->cbb, ins);
4412 /* g_assert_not_reached (); */
4414 #if HAVE_WRITE_BARRIERS
4416 MonoInst *dummy_use;
4417 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4418 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4419 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4423 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4427 } else if (cmethod->klass->image == mono_defaults.corlib) {
4428 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4429 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4430 if (should_insert_brekpoint (cfg->method))
4431 MONO_INST_NEW (cfg, ins, OP_BREAK);
4433 MONO_INST_NEW (cfg, ins, OP_NOP);
4434 MONO_ADD_INS (cfg->cbb, ins);
4437 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4438 && strcmp (cmethod->klass->name, "Environment") == 0) {
4440 EMIT_NEW_ICONST (cfg, ins, 1);
4442 EMIT_NEW_ICONST (cfg, ins, 0);
4446 } else if (cmethod->klass == mono_defaults.math_class) {
4448 * There is general branches code for Min/Max, but it does not work for
4450 * http://everything2.com/?node_id=1051618
4454 #ifdef MONO_ARCH_SIMD_INTRINSICS
4455 if (cfg->opt & MONO_OPT_SIMD) {
4456 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4462 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4466 * This entry point could be used later for arbitrary method
4469 inline static MonoInst*
4470 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4471 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4473 if (method->klass == mono_defaults.string_class) {
4474 /* managed string allocation support */
4475 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4476 MonoInst *iargs [2];
4477 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4478 MonoMethod *managed_alloc = NULL;
4480 g_assert (vtable); /*Should not fail since it System.String*/
4481 #ifndef MONO_CROSS_COMPILE
4482 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4486 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4487 iargs [1] = args [0];
4488 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4495 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4497 MonoInst *store, *temp;
4500 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4501 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4504 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4505 * would be different than the MonoInst's used to represent arguments, and
4506 * the ldelema implementation can't deal with that.
4507 * Solution: When ldelema is used on an inline argument, create a var for
4508 * it, emit ldelema on that var, and emit the saving code below in
4509 * inline_method () if needed.
4511 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4512 cfg->args [i] = temp;
4513 /* This uses cfg->args [i] which is set by the preceeding line */
4514 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4515 store->cil_code = sp [0]->cil_code;
4520 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4521 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4523 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4525 check_inline_called_method_name_limit (MonoMethod *called_method)
4528 static char *limit = NULL;
4530 if (limit == NULL) {
4531 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4533 if (limit_string != NULL)
4534 limit = limit_string;
4536 limit = (char *) "";
4539 if (limit [0] != '\0') {
4540 char *called_method_name = mono_method_full_name (called_method, TRUE);
4542 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4543 g_free (called_method_name);
4545 //return (strncmp_result <= 0);
4546 return (strncmp_result == 0);
4553 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4555 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4558 static char *limit = NULL;
4560 if (limit == NULL) {
4561 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4562 if (limit_string != NULL) {
4563 limit = limit_string;
4565 limit = (char *) "";
4569 if (limit [0] != '\0') {
4570 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4572 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4573 g_free (caller_method_name);
4575 //return (strncmp_result <= 0);
4576 return (strncmp_result == 0);
4584 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4585 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4587 MonoInst *ins, *rvar = NULL;
4588 MonoMethodHeader *cheader;
4589 MonoBasicBlock *ebblock, *sbblock;
4591 MonoMethod *prev_inlined_method;
4592 MonoInst **prev_locals, **prev_args;
4593 MonoType **prev_arg_types;
4594 guint prev_real_offset;
4595 GHashTable *prev_cbb_hash;
4596 MonoBasicBlock **prev_cil_offset_to_bb;
4597 MonoBasicBlock *prev_cbb;
4598 unsigned char* prev_cil_start;
4599 guint32 prev_cil_offset_to_bb_len;
4600 MonoMethod *prev_current_method;
4601 MonoGenericContext *prev_generic_context;
4602 gboolean ret_var_set, prev_ret_var_set;
4604 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4606 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4607 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4610 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4611 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4615 if (cfg->verbose_level > 2)
4616 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4618 if (!cmethod->inline_info) {
4619 mono_jit_stats.inlineable_methods++;
4620 cmethod->inline_info = 1;
4623 /* allocate local variables */
4624 cheader = mono_method_get_header (cmethod);
4626 if (cheader == NULL || mono_loader_get_last_error ()) {
4628 mono_metadata_free_mh (cheader);
4629 mono_loader_clear_error ();
4633 /* allocate space to store the return value */
4634 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4635 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4639 prev_locals = cfg->locals;
4640 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4641 for (i = 0; i < cheader->num_locals; ++i)
4642 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4644 /* allocate start and end blocks */
4645 /* This is needed so if the inline is aborted, we can clean up */
4646 NEW_BBLOCK (cfg, sbblock);
4647 sbblock->real_offset = real_offset;
4649 NEW_BBLOCK (cfg, ebblock);
4650 ebblock->block_num = cfg->num_bblocks++;
4651 ebblock->real_offset = real_offset;
4653 prev_args = cfg->args;
4654 prev_arg_types = cfg->arg_types;
4655 prev_inlined_method = cfg->inlined_method;
4656 cfg->inlined_method = cmethod;
4657 cfg->ret_var_set = FALSE;
4658 cfg->inline_depth ++;
4659 prev_real_offset = cfg->real_offset;
4660 prev_cbb_hash = cfg->cbb_hash;
4661 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4662 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4663 prev_cil_start = cfg->cil_start;
4664 prev_cbb = cfg->cbb;
4665 prev_current_method = cfg->current_method;
4666 prev_generic_context = cfg->generic_context;
4667 prev_ret_var_set = cfg->ret_var_set;
4669 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4671 ret_var_set = cfg->ret_var_set;
4673 cfg->inlined_method = prev_inlined_method;
4674 cfg->real_offset = prev_real_offset;
4675 cfg->cbb_hash = prev_cbb_hash;
4676 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4677 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4678 cfg->cil_start = prev_cil_start;
4679 cfg->locals = prev_locals;
4680 cfg->args = prev_args;
4681 cfg->arg_types = prev_arg_types;
4682 cfg->current_method = prev_current_method;
4683 cfg->generic_context = prev_generic_context;
4684 cfg->ret_var_set = prev_ret_var_set;
4685 cfg->inline_depth --;
4687 if ((costs >= 0 && costs < 60) || inline_allways) {
4688 if (cfg->verbose_level > 2)
4689 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4691 mono_jit_stats.inlined_methods++;
4693 /* always add some code to avoid block split failures */
4694 MONO_INST_NEW (cfg, ins, OP_NOP);
4695 MONO_ADD_INS (prev_cbb, ins);
4697 prev_cbb->next_bb = sbblock;
4698 link_bblock (cfg, prev_cbb, sbblock);
4701 * Get rid of the begin and end bblocks if possible to aid local
4704 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4706 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4707 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4709 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4710 MonoBasicBlock *prev = ebblock->in_bb [0];
4711 mono_merge_basic_blocks (cfg, prev, ebblock);
4713 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4714 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4715 cfg->cbb = prev_cbb;
4723 * If the inlined method contains only a throw, then the ret var is not
4724 * set, so set it to a dummy value.
4727 static double r8_0 = 0.0;
4729 switch (rvar->type) {
4731 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4734 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4739 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4742 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4743 ins->type = STACK_R8;
4744 ins->inst_p0 = (void*)&r8_0;
4745 ins->dreg = rvar->dreg;
4746 MONO_ADD_INS (cfg->cbb, ins);
4749 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4752 g_assert_not_reached ();
4756 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4759 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4762 if (cfg->verbose_level > 2)
4763 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4764 cfg->exception_type = MONO_EXCEPTION_NONE;
4765 mono_loader_clear_error ();
4767 /* This gets rid of the newly added bblocks */
4768 cfg->cbb = prev_cbb;
4770 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4775 * Some of these comments may well be out-of-date.
4776 * Design decisions: we do a single pass over the IL code (and we do bblock
4777 * splitting/merging in the few cases when it's required: a back jump to an IL
4778 * address that was not already seen as bblock starting point).
4779 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4780 * Complex operations are decomposed in simpler ones right away. We need to let the
4781 * arch-specific code peek and poke inside this process somehow (except when the
4782 * optimizations can take advantage of the full semantic info of coarse opcodes).
4783 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4784 * MonoInst->opcode initially is the IL opcode or some simplification of that
4785 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4786 * opcode with value bigger than OP_LAST.
4787 * At this point the IR can be handed over to an interpreter, a dumb code generator
4788 * or to the optimizing code generator that will translate it to SSA form.
4790 * Profiling directed optimizations.
4791 * We may compile by default with few or no optimizations and instrument the code
4792 * or the user may indicate what methods to optimize the most either in a config file
4793 * or through repeated runs where the compiler applies offline the optimizations to
4794 * each method and then decides if it was worth it.
4797 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4798 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4799 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4800 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4801 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4802 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4803 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4804 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4806 /* offset from br.s -> br like opcodes */
4807 #define BIG_BRANCH_OFFSET 13
4810 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4812 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4814 return b == NULL || b == bb;
4818 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4820 unsigned char *ip = start;
4821 unsigned char *target;
4824 MonoBasicBlock *bblock;
4825 const MonoOpcode *opcode;
4828 cli_addr = ip - start;
4829 i = mono_opcode_value ((const guint8 **)&ip, end);
4832 opcode = &mono_opcodes [i];
4833 switch (opcode->argument) {
4834 case MonoInlineNone:
4837 case MonoInlineString:
4838 case MonoInlineType:
4839 case MonoInlineField:
4840 case MonoInlineMethod:
4843 case MonoShortInlineR:
4850 case MonoShortInlineVar:
4851 case MonoShortInlineI:
4854 case MonoShortInlineBrTarget:
4855 target = start + cli_addr + 2 + (signed char)ip [1];
4856 GET_BBLOCK (cfg, bblock, target);
4859 GET_BBLOCK (cfg, bblock, ip);
4861 case MonoInlineBrTarget:
4862 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4863 GET_BBLOCK (cfg, bblock, target);
4866 GET_BBLOCK (cfg, bblock, ip);
4868 case MonoInlineSwitch: {
4869 guint32 n = read32 (ip + 1);
4872 cli_addr += 5 + 4 * n;
4873 target = start + cli_addr;
4874 GET_BBLOCK (cfg, bblock, target);
4876 for (j = 0; j < n; ++j) {
4877 target = start + cli_addr + (gint32)read32 (ip);
4878 GET_BBLOCK (cfg, bblock, target);
4888 g_assert_not_reached ();
4891 if (i == CEE_THROW) {
4892 unsigned char *bb_start = ip - 1;
4894 /* Find the start of the bblock containing the throw */
4896 while ((bb_start >= start) && !bblock) {
4897 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4901 bblock->out_of_line = 1;
4910 static inline MonoMethod *
4911 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4915 if (m->wrapper_type != MONO_WRAPPER_NONE)
4916 return mono_method_get_wrapper_data (m, token);
4918 method = mono_get_method_full (m->klass->image, token, klass, context);
4923 static inline MonoMethod *
4924 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4926 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4928 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4934 static inline MonoClass*
4935 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4939 if (method->wrapper_type != MONO_WRAPPER_NONE)
4940 klass = mono_method_get_wrapper_data (method, token);
4942 klass = mono_class_get_full (method->klass->image, token, context);
4944 mono_class_init (klass);
4949 * Returns TRUE if the JIT should abort inlining because "callee"
4950 * is influenced by security attributes.
4953 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4957 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4961 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4962 if (result == MONO_JIT_SECURITY_OK)
4965 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4966 /* Generate code to throw a SecurityException before the actual call/link */
4967 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4970 NEW_ICONST (cfg, args [0], 4);
4971 NEW_METHODCONST (cfg, args [1], caller);
4972 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4973 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4974 /* don't hide previous results */
4975 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4976 cfg->exception_data = result;
4984 throw_exception (void)
4986 static MonoMethod *method = NULL;
4989 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4990 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4997 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4999 MonoMethod *thrower = throw_exception ();
5002 EMIT_NEW_PCONST (cfg, args [0], ex);
5003 mono_emit_method_call (cfg, thrower, args, NULL);
5007 * Return the original method is a wrapper is specified. We can only access
5008 * the custom attributes from the original method.
5011 get_original_method (MonoMethod *method)
5013 if (method->wrapper_type == MONO_WRAPPER_NONE)
5016 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5017 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5020 /* in other cases we need to find the original method */
5021 return mono_marshal_method_from_wrapper (method);
5025 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5026 MonoBasicBlock *bblock, unsigned char *ip)
5028 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5029 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5031 emit_throw_exception (cfg, ex);
5035 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5036 MonoBasicBlock *bblock, unsigned char *ip)
5038 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5039 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5041 emit_throw_exception (cfg, ex);
5045 * Check that the IL instructions at ip are the array initialization
5046 * sequence and return the pointer to the data and the size.
5049 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5052 * newarr[System.Int32]
5054 * ldtoken field valuetype ...
5055 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5057 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5058 guint32 token = read32 (ip + 7);
5059 guint32 field_token = read32 (ip + 2);
5060 guint32 field_index = field_token & 0xffffff;
5062 const char *data_ptr;
5064 MonoMethod *cmethod;
5065 MonoClass *dummy_class;
5066 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5072 *out_field_token = field_token;
5074 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5077 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5079 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5080 case MONO_TYPE_BOOLEAN:
5084 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5085 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5086 case MONO_TYPE_CHAR:
5096 return NULL; /* stupid ARM FP swapped format */
5106 if (size > mono_type_size (field->type, &dummy_align))
5109 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5110 if (!method->klass->image->dynamic) {
5111 field_index = read32 (ip + 2) & 0xffffff;
5112 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5113 data_ptr = mono_image_rva_map (method->klass->image, rva);
5114 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5115 /* for aot code we do the lookup on load */
5116 if (aot && data_ptr)
5117 return GUINT_TO_POINTER (rva);
5119 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5121 data_ptr = mono_field_get_data (field);
5129 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5131 char *method_fname = mono_method_full_name (method, TRUE);
5133 MonoMethodHeader *header = mono_method_get_header (method);
5135 if (header->code_size == 0)
5136 method_code = g_strdup ("method body is empty.");
5138 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5139 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5140 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5141 g_free (method_fname);
5142 g_free (method_code);
5143 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5147 set_exception_object (MonoCompile *cfg, MonoException *exception)
5149 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5150 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5151 cfg->exception_ptr = exception;
5155 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5159 if (cfg->generic_sharing_context)
5160 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5162 type = &klass->byval_arg;
5163 return MONO_TYPE_IS_REFERENCE (type);
5167 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5170 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5171 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5172 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5173 /* Optimize reg-reg moves away */
5175 * Can't optimize other opcodes, since sp[0] might point to
5176 * the last ins of a decomposed opcode.
5178 sp [0]->dreg = (cfg)->locals [n]->dreg;
5180 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5185 * ldloca inhibits many optimizations so try to get rid of it in common
5188 static inline unsigned char *
5189 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5198 local = read16 (ip + 2);
5202 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5203 gboolean skip = FALSE;
5205 /* From the INITOBJ case */
5206 token = read32 (ip + 2);
5207 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5208 CHECK_TYPELOAD (klass);
5209 if (generic_class_is_reference_type (cfg, klass)) {
5210 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5211 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5212 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5213 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5214 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5227 is_exception_class (MonoClass *class)
5230 if (class == mono_defaults.exception_class)
5232 class = class->parent;
5238 * mono_method_to_ir:
5240 * Translate the .net IL into linear IR.
5243 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5244 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5245 guint inline_offset, gboolean is_virtual_call)
5248 MonoInst *ins, **sp, **stack_start;
5249 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5250 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5251 MonoMethod *cmethod, *method_definition;
5252 MonoInst **arg_array;
5253 MonoMethodHeader *header;
5255 guint32 token, ins_flag;
5257 MonoClass *constrained_call = NULL;
5258 unsigned char *ip, *end, *target, *err_pos;
5259 static double r8_0 = 0.0;
5260 MonoMethodSignature *sig;
5261 MonoGenericContext *generic_context = NULL;
5262 MonoGenericContainer *generic_container = NULL;
5263 MonoType **param_types;
5264 int i, n, start_new_bblock, dreg;
5265 int num_calls = 0, inline_costs = 0;
5266 int breakpoint_id = 0;
5268 MonoBoolean security, pinvoke;
5269 MonoSecurityManager* secman = NULL;
5270 MonoDeclSecurityActions actions;
5271 GSList *class_inits = NULL;
5272 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5274 gboolean init_locals, seq_points, skip_dead_blocks;
5276 /* serialization and xdomain stuff may need access to private fields and methods */
5277 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5278 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5279 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5280 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5281 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5282 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5284 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5286 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5287 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5288 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5289 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5291 image = method->klass->image;
5292 header = mono_method_get_header (method);
5294 MonoLoaderError *error;
5296 if ((error = mono_loader_get_last_error ())) {
5297 cfg->exception_type = error->exception_type;
5299 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5300 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5302 goto exception_exit;
5304 generic_container = mono_method_get_generic_container (method);
5305 sig = mono_method_signature (method);
5306 num_args = sig->hasthis + sig->param_count;
5307 ip = (unsigned char*)header->code;
5308 cfg->cil_start = ip;
5309 end = ip + header->code_size;
5310 mono_jit_stats.cil_code_size += header->code_size;
5311 init_locals = header->init_locals;
5313 seq_points = cfg->gen_seq_points && cfg->method == method;
5316 * Methods without init_locals set could cause asserts in various passes
5321 method_definition = method;
5322 while (method_definition->is_inflated) {
5323 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5324 method_definition = imethod->declaring;
5327 /* SkipVerification is not allowed if core-clr is enabled */
5328 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5330 dont_verify_stloc = TRUE;
5333 if (!dont_verify && mini_method_verify (cfg, method_definition))
5334 goto exception_exit;
5336 if (mono_debug_using_mono_debugger ())
5337 cfg->keep_cil_nops = TRUE;
5339 if (sig->is_inflated)
5340 generic_context = mono_method_get_context (method);
5341 else if (generic_container)
5342 generic_context = &generic_container->context;
5343 cfg->generic_context = generic_context;
5345 if (!cfg->generic_sharing_context)
5346 g_assert (!sig->has_type_parameters);
5348 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5349 g_assert (method->is_inflated);
5350 g_assert (mono_method_get_context (method)->method_inst);
5352 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5353 g_assert (sig->generic_param_count);
5355 if (cfg->method == method) {
5356 cfg->real_offset = 0;
5358 cfg->real_offset = inline_offset;
5361 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5362 cfg->cil_offset_to_bb_len = header->code_size;
5364 cfg->current_method = method;
5366 if (cfg->verbose_level > 2)
5367 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5369 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5371 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5372 for (n = 0; n < sig->param_count; ++n)
5373 param_types [n + sig->hasthis] = sig->params [n];
5374 cfg->arg_types = param_types;
5376 dont_inline = g_list_prepend (dont_inline, method);
5377 if (cfg->method == method) {
5379 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5380 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5383 NEW_BBLOCK (cfg, start_bblock);
5384 cfg->bb_entry = start_bblock;
5385 start_bblock->cil_code = NULL;
5386 start_bblock->cil_length = 0;
5389 NEW_BBLOCK (cfg, end_bblock);
5390 cfg->bb_exit = end_bblock;
5391 end_bblock->cil_code = NULL;
5392 end_bblock->cil_length = 0;
5393 g_assert (cfg->num_bblocks == 2);
5395 arg_array = cfg->args;
5397 if (header->num_clauses) {
5398 cfg->spvars = g_hash_table_new (NULL, NULL);
5399 cfg->exvars = g_hash_table_new (NULL, NULL);
5401 /* handle exception clauses */
5402 for (i = 0; i < header->num_clauses; ++i) {
5403 MonoBasicBlock *try_bb;
5404 MonoExceptionClause *clause = &header->clauses [i];
5405 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5406 try_bb->real_offset = clause->try_offset;
5407 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5408 tblock->real_offset = clause->handler_offset;
5409 tblock->flags |= BB_EXCEPTION_HANDLER;
5411 link_bblock (cfg, try_bb, tblock);
5413 if (*(ip + clause->handler_offset) == CEE_POP)
5414 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5416 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5417 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5418 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5419 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5420 MONO_ADD_INS (tblock, ins);
5422 /* todo: is a fault block unsafe to optimize? */
5423 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5424 tblock->flags |= BB_EXCEPTION_UNSAFE;
5428 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5430 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5432 /* catch and filter blocks get the exception object on the stack */
5433 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5434 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5435 MonoInst *dummy_use;
5437 /* mostly like handle_stack_args (), but just sets the input args */
5438 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5439 tblock->in_scount = 1;
5440 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5441 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5444 * Add a dummy use for the exvar so its liveness info will be
5448 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5450 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5451 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5452 tblock->flags |= BB_EXCEPTION_HANDLER;
5453 tblock->real_offset = clause->data.filter_offset;
5454 tblock->in_scount = 1;
5455 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5456 /* The filter block shares the exvar with the handler block */
5457 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5458 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5459 MONO_ADD_INS (tblock, ins);
5463 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5464 clause->data.catch_class &&
5465 cfg->generic_sharing_context &&
5466 mono_class_check_context_used (clause->data.catch_class)) {
5468 * In shared generic code with catch
5469 * clauses containing type variables
5470 * the exception handling code has to
5471 * be able to get to the rgctx.
5472 * Therefore we have to make sure that
5473 * the vtable/mrgctx argument (for
5474 * static or generic methods) or the
5475 * "this" argument (for non-static
5476 * methods) are live.
5478 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5479 mini_method_get_context (method)->method_inst ||
5480 method->klass->valuetype) {
5481 mono_get_vtable_var (cfg);
5483 MonoInst *dummy_use;
5485 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5490 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5491 cfg->cbb = start_bblock;
5492 cfg->args = arg_array;
5493 mono_save_args (cfg, sig, inline_args);
5496 /* FIRST CODE BLOCK */
5497 NEW_BBLOCK (cfg, bblock);
5498 bblock->cil_code = ip;
5502 ADD_BBLOCK (cfg, bblock);
5504 if (cfg->method == method) {
5505 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5506 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5507 MONO_INST_NEW (cfg, ins, OP_BREAK);
5508 MONO_ADD_INS (bblock, ins);
5512 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5513 secman = mono_security_manager_get_methods ();
5515 security = (secman && mono_method_has_declsec (method));
5516 /* at this point having security doesn't mean we have any code to generate */
5517 if (security && (cfg->method == method)) {
5518 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5519 * And we do not want to enter the next section (with allocation) if we
5520 * have nothing to generate */
5521 security = mono_declsec_get_demands (method, &actions);
5524 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5525 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5527 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5528 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5529 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5531 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5532 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5536 mono_custom_attrs_free (custom);
5539 custom = mono_custom_attrs_from_class (wrapped->klass);
5540 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5544 mono_custom_attrs_free (custom);
5547 /* not a P/Invoke after all */
5552 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5553 /* we use a separate basic block for the initialization code */
5554 NEW_BBLOCK (cfg, init_localsbb);
5555 cfg->bb_init = init_localsbb;
5556 init_localsbb->real_offset = cfg->real_offset;
5557 start_bblock->next_bb = init_localsbb;
5558 init_localsbb->next_bb = bblock;
5559 link_bblock (cfg, start_bblock, init_localsbb);
5560 link_bblock (cfg, init_localsbb, bblock);
5562 cfg->cbb = init_localsbb;
5564 start_bblock->next_bb = bblock;
5565 link_bblock (cfg, start_bblock, bblock);
5568 /* at this point we know, if security is TRUE, that some code needs to be generated */
5569 if (security && (cfg->method == method)) {
5572 mono_jit_stats.cas_demand_generation++;
5574 if (actions.demand.blob) {
5575 /* Add code for SecurityAction.Demand */
5576 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5577 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5578 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5579 mono_emit_method_call (cfg, secman->demand, args, NULL);
5581 if (actions.noncasdemand.blob) {
5582 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5583 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5584 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5585 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5586 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5587 mono_emit_method_call (cfg, secman->demand, args, NULL);
5589 if (actions.demandchoice.blob) {
5590 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5591 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5592 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5593 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5594 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5598 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5600 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5603 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5604 /* check if this is native code, e.g. an icall or a p/invoke */
5605 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5606 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5608 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5609 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5611 /* if this ia a native call then it can only be JITted from platform code */
5612 if ((icall || pinvk) && method->klass && method->klass->image) {
5613 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5614 MonoException *ex = icall ? mono_get_exception_security () :
5615 mono_get_exception_method_access ();
5616 emit_throw_exception (cfg, ex);
5623 if (header->code_size == 0)
5626 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5631 if (cfg->method == method)
5632 mono_debug_init_method (cfg, bblock, breakpoint_id);
5634 for (n = 0; n < header->num_locals; ++n) {
5635 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5640 /* We force the vtable variable here for all shared methods
5641 for the possibility that they might show up in a stack
5642 trace where their exact instantiation is needed. */
5643 if (cfg->generic_sharing_context && method == cfg->method) {
5644 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5645 mini_method_get_context (method)->method_inst ||
5646 method->klass->valuetype) {
5647 mono_get_vtable_var (cfg);
5649 /* FIXME: Is there a better way to do this?
5650 We need the variable live for the duration
5651 of the whole method. */
5652 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5656 /* add a check for this != NULL to inlined methods */
5657 if (is_virtual_call) {
5660 NEW_ARGLOAD (cfg, arg_ins, 0);
5661 MONO_ADD_INS (cfg->cbb, arg_ins);
5662 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5665 skip_dead_blocks = !dont_verify;
5666 if (skip_dead_blocks) {
5667 original_bb = bb = mono_basic_block_split (method, &error);
5668 if (!mono_error_ok (&error)) {
5669 mono_error_cleanup (&error);
5675 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5676 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5679 start_new_bblock = 0;
5682 if (cfg->method == method)
5683 cfg->real_offset = ip - header->code;
5685 cfg->real_offset = inline_offset;
5690 if (start_new_bblock) {
5691 bblock->cil_length = ip - bblock->cil_code;
5692 if (start_new_bblock == 2) {
5693 g_assert (ip == tblock->cil_code);
5695 GET_BBLOCK (cfg, tblock, ip);
5697 bblock->next_bb = tblock;
5700 start_new_bblock = 0;
5701 for (i = 0; i < bblock->in_scount; ++i) {
5702 if (cfg->verbose_level > 3)
5703 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5704 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5708 g_slist_free (class_inits);
5711 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5712 link_bblock (cfg, bblock, tblock);
5713 if (sp != stack_start) {
5714 handle_stack_args (cfg, stack_start, sp - stack_start);
5716 CHECK_UNVERIFIABLE (cfg);
5718 bblock->next_bb = tblock;
5721 for (i = 0; i < bblock->in_scount; ++i) {
5722 if (cfg->verbose_level > 3)
5723 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5724 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5727 g_slist_free (class_inits);
5732 if (skip_dead_blocks) {
5733 int ip_offset = ip - header->code;
5735 if (ip_offset == bb->end)
5739 int op_size = mono_opcode_size (ip, end);
5740 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5742 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5744 if (ip_offset + op_size == bb->end) {
5745 MONO_INST_NEW (cfg, ins, OP_NOP);
5746 MONO_ADD_INS (bblock, ins);
5747 start_new_bblock = 1;
5755 * Sequence points are points where the debugger can place a breakpoint.
5756 * Currently, we generate these automatically at points where the IL
5759 if (seq_points && sp == stack_start) {
5760 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5761 MONO_ADD_INS (cfg->cbb, ins);
5764 bblock->real_offset = cfg->real_offset;
5766 if ((cfg->method == method) && cfg->coverage_info) {
5767 guint32 cil_offset = ip - header->code;
5768 cfg->coverage_info->data [cil_offset].cil_code = ip;
5770 /* TODO: Use an increment here */
5771 #if defined(TARGET_X86)
5772 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5773 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5775 MONO_ADD_INS (cfg->cbb, ins);
5777 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5778 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5782 if (cfg->verbose_level > 3)
5783 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5787 if (cfg->keep_cil_nops)
5788 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5790 MONO_INST_NEW (cfg, ins, OP_NOP);
5792 MONO_ADD_INS (bblock, ins);
5795 if (should_insert_brekpoint (cfg->method))
5796 MONO_INST_NEW (cfg, ins, OP_BREAK);
5798 MONO_INST_NEW (cfg, ins, OP_NOP);
5800 MONO_ADD_INS (bblock, ins);
5806 CHECK_STACK_OVF (1);
5807 n = (*ip)-CEE_LDARG_0;
5809 EMIT_NEW_ARGLOAD (cfg, ins, n);
5817 CHECK_STACK_OVF (1);
5818 n = (*ip)-CEE_LDLOC_0;
5820 EMIT_NEW_LOCLOAD (cfg, ins, n);
5829 n = (*ip)-CEE_STLOC_0;
5832 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5834 emit_stloc_ir (cfg, sp, header, n);
5841 CHECK_STACK_OVF (1);
5844 EMIT_NEW_ARGLOAD (cfg, ins, n);
5850 CHECK_STACK_OVF (1);
5853 NEW_ARGLOADA (cfg, ins, n);
5854 MONO_ADD_INS (cfg->cbb, ins);
5864 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5866 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5871 CHECK_STACK_OVF (1);
5874 EMIT_NEW_LOCLOAD (cfg, ins, n);
5878 case CEE_LDLOCA_S: {
5879 unsigned char *tmp_ip;
5881 CHECK_STACK_OVF (1);
5882 CHECK_LOCAL (ip [1]);
5884 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5890 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5899 CHECK_LOCAL (ip [1]);
5900 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5902 emit_stloc_ir (cfg, sp, header, ip [1]);
5907 CHECK_STACK_OVF (1);
5908 EMIT_NEW_PCONST (cfg, ins, NULL);
5909 ins->type = STACK_OBJ;
5914 CHECK_STACK_OVF (1);
5915 EMIT_NEW_ICONST (cfg, ins, -1);
5928 CHECK_STACK_OVF (1);
5929 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5935 CHECK_STACK_OVF (1);
5937 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5943 CHECK_STACK_OVF (1);
5944 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5950 CHECK_STACK_OVF (1);
5951 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5952 ins->type = STACK_I8;
5953 ins->dreg = alloc_dreg (cfg, STACK_I8);
5955 ins->inst_l = (gint64)read64 (ip);
5956 MONO_ADD_INS (bblock, ins);
5962 gboolean use_aotconst = FALSE;
5964 #ifdef TARGET_POWERPC
5965 /* FIXME: Clean this up */
5966 if (cfg->compile_aot)
5967 use_aotconst = TRUE;
5970 /* FIXME: we should really allocate this only late in the compilation process */
5971 f = mono_domain_alloc (cfg->domain, sizeof (float));
5973 CHECK_STACK_OVF (1);
5979 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5981 dreg = alloc_freg (cfg);
5982 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5983 ins->type = STACK_R8;
5985 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5986 ins->type = STACK_R8;
5987 ins->dreg = alloc_dreg (cfg, STACK_R8);
5989 MONO_ADD_INS (bblock, ins);
5999 gboolean use_aotconst = FALSE;
6001 #ifdef TARGET_POWERPC
6002 /* FIXME: Clean this up */
6003 if (cfg->compile_aot)
6004 use_aotconst = TRUE;
6007 /* FIXME: we should really allocate this only late in the compilation process */
6008 d = mono_domain_alloc (cfg->domain, sizeof (double));
6010 CHECK_STACK_OVF (1);
6016 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6018 dreg = alloc_freg (cfg);
6019 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6020 ins->type = STACK_R8;
6022 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6023 ins->type = STACK_R8;
6024 ins->dreg = alloc_dreg (cfg, STACK_R8);
6026 MONO_ADD_INS (bblock, ins);
6035 MonoInst *temp, *store;
6037 CHECK_STACK_OVF (1);
6041 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6042 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6044 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6047 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6060 if (sp [0]->type == STACK_R8)
6061 /* we need to pop the value from the x86 FP stack */
6062 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6071 if (stack_start != sp)
6073 token = read32 (ip + 1);
6074 /* FIXME: check the signature matches */
6075 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6080 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6081 GENERIC_SHARING_FAILURE (CEE_JMP);
6083 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6084 CHECK_CFG_EXCEPTION;
6086 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6088 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6091 /* Handle tail calls similarly to calls */
6092 n = fsig->param_count + fsig->hasthis;
6094 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6095 call->method = cmethod;
6096 call->tail_call = TRUE;
6097 call->signature = mono_method_signature (cmethod);
6098 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6099 call->inst.inst_p0 = cmethod;
6100 for (i = 0; i < n; ++i)
6101 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6103 mono_arch_emit_call (cfg, call);
6104 MONO_ADD_INS (bblock, (MonoInst*)call);
6107 for (i = 0; i < num_args; ++i)
6108 /* Prevent arguments from being optimized away */
6109 arg_array [i]->flags |= MONO_INST_VOLATILE;
6111 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6112 ins = (MonoInst*)call;
6113 ins->inst_p0 = cmethod;
6114 MONO_ADD_INS (bblock, ins);
6118 start_new_bblock = 1;
6123 case CEE_CALLVIRT: {
6124 MonoInst *addr = NULL;
6125 MonoMethodSignature *fsig = NULL;
6127 int virtual = *ip == CEE_CALLVIRT;
6128 int calli = *ip == CEE_CALLI;
6129 gboolean pass_imt_from_rgctx = FALSE;
6130 MonoInst *imt_arg = NULL;
6131 gboolean pass_vtable = FALSE;
6132 gboolean pass_mrgctx = FALSE;
6133 MonoInst *vtable_arg = NULL;
6134 gboolean check_this = FALSE;
6135 gboolean supported_tail_call = FALSE;
6138 token = read32 (ip + 1);
6145 if (method->wrapper_type != MONO_WRAPPER_NONE)
6146 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6148 fsig = mono_metadata_parse_signature (image, token);
6150 n = fsig->param_count + fsig->hasthis;
6152 if (method->dynamic && fsig->pinvoke) {
6156 * This is a call through a function pointer using a pinvoke
6157 * signature. Have to create a wrapper and call that instead.
6158 * FIXME: This is very slow, need to create a wrapper at JIT time
6159 * instead based on the signature.
6161 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6162 EMIT_NEW_PCONST (cfg, args [1], fsig);
6164 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6167 MonoMethod *cil_method;
6169 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6170 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6171 cil_method = cmethod;
6172 } else if (constrained_call) {
6173 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6175 * This is needed since get_method_constrained can't find
6176 * the method in klass representing a type var.
6177 * The type var is guaranteed to be a reference type in this
6180 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6181 cil_method = cmethod;
6182 g_assert (!cmethod->klass->valuetype);
6184 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6187 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6188 cil_method = cmethod;
6193 if (!dont_verify && !cfg->skip_visibility) {
6194 MonoMethod *target_method = cil_method;
6195 if (method->is_inflated) {
6196 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6198 if (!mono_method_can_access_method (method_definition, target_method) &&
6199 !mono_method_can_access_method (method, cil_method))
6200 METHOD_ACCESS_FAILURE;
6203 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6204 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6206 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6207 /* MS.NET seems to silently convert this to a callvirt */
6212 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6213 * converts to a callvirt.
6215 * tests/bug-515884.il is an example of this behavior
6217 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6218 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6219 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6223 if (!cmethod->klass->inited)
6224 if (!mono_class_init (cmethod->klass))
6227 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6228 mini_class_is_system_array (cmethod->klass)) {
6229 array_rank = cmethod->klass->rank;
6230 fsig = mono_method_signature (cmethod);
6232 fsig = mono_method_signature (cmethod);
6237 if (fsig->pinvoke) {
6238 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6239 check_for_pending_exc, FALSE);
6240 fsig = mono_method_signature (wrapper);
6241 } else if (constrained_call) {
6242 fsig = mono_method_signature (cmethod);
6244 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6248 mono_save_token_info (cfg, image, token, cil_method);
6250 n = fsig->param_count + fsig->hasthis;
6252 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6253 if (check_linkdemand (cfg, method, cmethod))
6255 CHECK_CFG_EXCEPTION;
6258 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6259 g_assert_not_reached ();
6262 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6265 if (!cfg->generic_sharing_context && cmethod)
6266 g_assert (!mono_method_check_context_used (cmethod));
6270 //g_assert (!virtual || fsig->hasthis);
6274 if (constrained_call) {
6276 * We have the `constrained.' prefix opcode.
6278 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6280 * The type parameter is instantiated as a valuetype,
6281 * but that type doesn't override the method we're
6282 * calling, so we need to box `this'.
6284 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6285 ins->klass = constrained_call;
6286 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6287 CHECK_CFG_EXCEPTION;
6288 } else if (!constrained_call->valuetype) {
6289 int dreg = alloc_preg (cfg);
6292 * The type parameter is instantiated as a reference
6293 * type. We have a managed pointer on the stack, so
6294 * we need to dereference it here.
6296 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6297 ins->type = STACK_OBJ;
6299 } else if (cmethod->klass->valuetype)
6301 constrained_call = NULL;
6304 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6308 * If the callee is a shared method, then its static cctor
6309 * might not get called after the call was patched.
6311 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6312 emit_generic_class_init (cfg, cmethod->klass);
6313 CHECK_TYPELOAD (cmethod->klass);
6316 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6317 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6318 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6319 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6320 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6323 * Pass vtable iff target method might
6324 * be shared, which means that sharing
6325 * is enabled for its class and its
6326 * context is sharable (and it's not a
6329 if (sharing_enabled && context_sharable &&
6330 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6334 if (cmethod && mini_method_get_context (cmethod) &&
6335 mini_method_get_context (cmethod)->method_inst) {
6336 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6337 MonoGenericContext *context = mini_method_get_context (cmethod);
6338 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6340 g_assert (!pass_vtable);
6342 if (sharing_enabled && context_sharable)
6346 if (cfg->generic_sharing_context && cmethod) {
6347 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6349 context_used = mono_method_check_context_used (cmethod);
6351 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6352 /* Generic method interface
6353 calls are resolved via a
6354 helper function and don't
6356 if (!cmethod_context || !cmethod_context->method_inst)
6357 pass_imt_from_rgctx = TRUE;
6361 * If a shared method calls another
6362 * shared method then the caller must
6363 * have a generic sharing context
6364 * because the magic trampoline
6365 * requires it. FIXME: We shouldn't
6366 * have to force the vtable/mrgctx
6367 * variable here. Instead there
6368 * should be a flag in the cfg to
6369 * request a generic sharing context.
6372 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6373 mono_get_vtable_var (cfg);
6378 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6380 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6382 CHECK_TYPELOAD (cmethod->klass);
6383 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6388 g_assert (!vtable_arg);
6390 if (!cfg->compile_aot) {
6392 * emit_get_rgctx_method () calls mono_class_vtable () so check
6393 * for type load errors before.
6395 mono_class_setup_vtable (cmethod->klass);
6396 CHECK_TYPELOAD (cmethod->klass);
6399 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6401 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6402 MONO_METHOD_IS_FINAL (cmethod)) {
6409 if (pass_imt_from_rgctx) {
6410 g_assert (!pass_vtable);
6413 imt_arg = emit_get_rgctx_method (cfg, context_used,
6414 cmethod, MONO_RGCTX_INFO_METHOD);
6418 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6420 /* Calling virtual generic methods */
6421 if (cmethod && virtual &&
6422 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6423 !(MONO_METHOD_IS_FINAL (cmethod) &&
6424 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6425 mono_method_signature (cmethod)->generic_param_count) {
6426 MonoInst *this_temp, *this_arg_temp, *store;
6427 MonoInst *iargs [4];
6429 g_assert (mono_method_signature (cmethod)->is_inflated);
6431 /* Prevent inlining of methods that contain indirect calls */
6434 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6435 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6436 g_assert (!imt_arg);
6438 g_assert (cmethod->is_inflated);
6439 imt_arg = emit_get_rgctx_method (cfg, context_used,
6440 cmethod, MONO_RGCTX_INFO_METHOD);
6441 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6445 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6446 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6447 MONO_ADD_INS (bblock, store);
6449 /* FIXME: This should be a managed pointer */
6450 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6452 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6453 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6454 cmethod, MONO_RGCTX_INFO_METHOD);
6455 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6456 addr = mono_emit_jit_icall (cfg,
6457 mono_helper_compile_generic_method, iargs);
6459 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6461 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6464 if (!MONO_TYPE_IS_VOID (fsig->ret))
6465 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6467 CHECK_CFG_EXCEPTION;
6474 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6475 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6477 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6481 /* FIXME: runtime generic context pointer for jumps? */
6482 /* FIXME: handle this for generic sharing eventually */
6483 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6486 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6489 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6490 /* Handle tail calls similarly to calls */
6491 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6493 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6494 call->tail_call = TRUE;
6495 call->method = cmethod;
6496 call->signature = mono_method_signature (cmethod);
6499 * We implement tail calls by storing the actual arguments into the
6500 * argument variables, then emitting a CEE_JMP.
6502 for (i = 0; i < n; ++i) {
6503 /* Prevent argument from being register allocated */
6504 arg_array [i]->flags |= MONO_INST_VOLATILE;
6505 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6509 ins = (MonoInst*)call;
6510 ins->inst_p0 = cmethod;
6511 ins->inst_p1 = arg_array [0];
6512 MONO_ADD_INS (bblock, ins);
6513 link_bblock (cfg, bblock, end_bblock);
6514 start_new_bblock = 1;
6516 CHECK_CFG_EXCEPTION;
6518 /* skip CEE_RET as well */
6524 /* Conversion to a JIT intrinsic */
6525 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6526 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6527 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6532 CHECK_CFG_EXCEPTION;
6540 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6541 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6542 mono_method_check_inlining (cfg, cmethod) &&
6543 !g_list_find (dont_inline, cmethod)) {
6545 gboolean allways = FALSE;
6547 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6548 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6549 /* Prevent inlining of methods that call wrappers */
6551 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6555 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6557 cfg->real_offset += 5;
6560 if (!MONO_TYPE_IS_VOID (fsig->ret))
6561 /* *sp is already set by inline_method */
6564 inline_costs += costs;
6570 inline_costs += 10 * num_calls++;
6572 /* Tail recursion elimination */
6573 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6574 gboolean has_vtargs = FALSE;
6577 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6580 /* keep it simple */
6581 for (i = fsig->param_count - 1; i >= 0; i--) {
6582 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6587 for (i = 0; i < n; ++i)
6588 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6589 MONO_INST_NEW (cfg, ins, OP_BR);
6590 MONO_ADD_INS (bblock, ins);
6591 tblock = start_bblock->out_bb [0];
6592 link_bblock (cfg, bblock, tblock);
6593 ins->inst_target_bb = tblock;
6594 start_new_bblock = 1;
6596 /* skip the CEE_RET, too */
6597 if (ip_in_bb (cfg, bblock, ip + 5))
6607 /* Generic sharing */
6608 /* FIXME: only do this for generic methods if
6609 they are not shared! */
6610 if (context_used && !imt_arg && !array_rank &&
6611 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6612 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6613 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6614 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6617 g_assert (cfg->generic_sharing_context && cmethod);
6621 * We are compiling a call to a
6622 * generic method from shared code,
6623 * which means that we have to look up
6624 * the method in the rgctx and do an
6627 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6630 /* Indirect calls */
6632 g_assert (!imt_arg);
6634 if (*ip == CEE_CALL)
6635 g_assert (context_used);
6636 else if (*ip == CEE_CALLI)
6637 g_assert (!vtable_arg);
6639 /* FIXME: what the hell is this??? */
6640 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6641 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6643 /* Prevent inlining of methods with indirect calls */
6648 int rgctx_reg = mono_alloc_preg (cfg);
6650 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6651 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6652 call = (MonoCallInst*)ins;
6653 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6655 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6657 * Instead of emitting an indirect call, emit a direct call
6658 * with the contents of the aotconst as the patch info.
6660 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6662 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6663 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6666 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6669 if (!MONO_TYPE_IS_VOID (fsig->ret))
6670 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6672 CHECK_CFG_EXCEPTION;
6683 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6684 if (sp [fsig->param_count]->type == STACK_OBJ) {
6685 MonoInst *iargs [2];
6688 iargs [1] = sp [fsig->param_count];
6690 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6693 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6694 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6695 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6696 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6698 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6701 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6702 if (!cmethod->klass->element_class->valuetype && !readonly)
6703 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6704 CHECK_TYPELOAD (cmethod->klass);
6707 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6710 g_assert_not_reached ();
6713 CHECK_CFG_EXCEPTION;
6720 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6722 if (!MONO_TYPE_IS_VOID (fsig->ret))
6723 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6725 CHECK_CFG_EXCEPTION;
6735 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6737 } else if (imt_arg) {
6738 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6740 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6743 if (!MONO_TYPE_IS_VOID (fsig->ret))
6744 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6746 CHECK_CFG_EXCEPTION;
6753 if (cfg->method != method) {
6754 /* return from inlined method */
6756 * If in_count == 0, that means the ret is unreachable due to
6757 * being preceeded by a throw. In that case, inline_method () will
6758 * handle setting the return value
6759 * (test case: test_0_inline_throw ()).
6761 if (return_var && cfg->cbb->in_count) {
6765 //g_assert (returnvar != -1);
6766 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6767 cfg->ret_var_set = TRUE;
6771 MonoType *ret_type = mono_method_signature (method)->ret;
6775 * Place a seq point here too even through the IL stack is not
6776 * empty, so a step over on
6779 * will work correctly.
6781 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6782 MONO_ADD_INS (cfg->cbb, ins);
6785 g_assert (!return_var);
6788 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6791 if (!cfg->vret_addr) {
6794 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6796 EMIT_NEW_RETLOADA (cfg, ret_addr);
6798 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6799 ins->klass = mono_class_from_mono_type (ret_type);
6802 #ifdef MONO_ARCH_SOFT_FLOAT
6803 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6804 MonoInst *iargs [1];
6808 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6809 mono_arch_emit_setret (cfg, method, conv);
6811 mono_arch_emit_setret (cfg, method, *sp);
6814 mono_arch_emit_setret (cfg, method, *sp);
6819 if (sp != stack_start)
6821 MONO_INST_NEW (cfg, ins, OP_BR);
6823 ins->inst_target_bb = end_bblock;
6824 MONO_ADD_INS (bblock, ins);
6825 link_bblock (cfg, bblock, end_bblock);
6826 start_new_bblock = 1;
6830 MONO_INST_NEW (cfg, ins, OP_BR);
6832 target = ip + 1 + (signed char)(*ip);
6834 GET_BBLOCK (cfg, tblock, target);
6835 link_bblock (cfg, bblock, tblock);
6836 ins->inst_target_bb = tblock;
6837 if (sp != stack_start) {
6838 handle_stack_args (cfg, stack_start, sp - stack_start);
6840 CHECK_UNVERIFIABLE (cfg);
6842 MONO_ADD_INS (bblock, ins);
6843 start_new_bblock = 1;
6844 inline_costs += BRANCH_COST;
6858 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6860 target = ip + 1 + *(signed char*)ip;
6866 inline_costs += BRANCH_COST;
6870 MONO_INST_NEW (cfg, ins, OP_BR);
6873 target = ip + 4 + (gint32)read32(ip);
6875 GET_BBLOCK (cfg, tblock, target);
6876 link_bblock (cfg, bblock, tblock);
6877 ins->inst_target_bb = tblock;
6878 if (sp != stack_start) {
6879 handle_stack_args (cfg, stack_start, sp - stack_start);
6881 CHECK_UNVERIFIABLE (cfg);
6884 MONO_ADD_INS (bblock, ins);
6886 start_new_bblock = 1;
6887 inline_costs += BRANCH_COST;
6894 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6895 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6896 guint32 opsize = is_short ? 1 : 4;
6898 CHECK_OPSIZE (opsize);
6900 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6903 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6908 GET_BBLOCK (cfg, tblock, target);
6909 link_bblock (cfg, bblock, tblock);
6910 GET_BBLOCK (cfg, tblock, ip);
6911 link_bblock (cfg, bblock, tblock);
6913 if (sp != stack_start) {
6914 handle_stack_args (cfg, stack_start, sp - stack_start);
6915 CHECK_UNVERIFIABLE (cfg);
6918 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6919 cmp->sreg1 = sp [0]->dreg;
6920 type_from_op (cmp, sp [0], NULL);
6923 #if SIZEOF_REGISTER == 4
6924 if (cmp->opcode == OP_LCOMPARE_IMM) {
6925 /* Convert it to OP_LCOMPARE */
6926 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6927 ins->type = STACK_I8;
6928 ins->dreg = alloc_dreg (cfg, STACK_I8);
6930 MONO_ADD_INS (bblock, ins);
6931 cmp->opcode = OP_LCOMPARE;
6932 cmp->sreg2 = ins->dreg;
6935 MONO_ADD_INS (bblock, cmp);
6937 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6938 type_from_op (ins, sp [0], NULL);
6939 MONO_ADD_INS (bblock, ins);
6940 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6941 GET_BBLOCK (cfg, tblock, target);
6942 ins->inst_true_bb = tblock;
6943 GET_BBLOCK (cfg, tblock, ip);
6944 ins->inst_false_bb = tblock;
6945 start_new_bblock = 2;
6948 inline_costs += BRANCH_COST;
6963 MONO_INST_NEW (cfg, ins, *ip);
6965 target = ip + 4 + (gint32)read32(ip);
6971 inline_costs += BRANCH_COST;
6975 MonoBasicBlock **targets;
6976 MonoBasicBlock *default_bblock;
6977 MonoJumpInfoBBTable *table;
6978 int offset_reg = alloc_preg (cfg);
6979 int target_reg = alloc_preg (cfg);
6980 int table_reg = alloc_preg (cfg);
6981 int sum_reg = alloc_preg (cfg);
6982 gboolean use_op_switch;
6986 n = read32 (ip + 1);
6989 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6993 CHECK_OPSIZE (n * sizeof (guint32));
6994 target = ip + n * sizeof (guint32);
6996 GET_BBLOCK (cfg, default_bblock, target);
6998 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6999 for (i = 0; i < n; ++i) {
7000 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7001 targets [i] = tblock;
7005 if (sp != stack_start) {
7007 * Link the current bb with the targets as well, so handle_stack_args
7008 * will set their in_stack correctly.
7010 link_bblock (cfg, bblock, default_bblock);
7011 for (i = 0; i < n; ++i)
7012 link_bblock (cfg, bblock, targets [i]);
7014 handle_stack_args (cfg, stack_start, sp - stack_start);
7016 CHECK_UNVERIFIABLE (cfg);
7019 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7020 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7023 for (i = 0; i < n; ++i)
7024 link_bblock (cfg, bblock, targets [i]);
7026 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7027 table->table = targets;
7028 table->table_size = n;
7030 use_op_switch = FALSE;
7032 /* ARM implements SWITCH statements differently */
7033 /* FIXME: Make it use the generic implementation */
7034 if (!cfg->compile_aot)
7035 use_op_switch = TRUE;
7038 if (COMPILE_LLVM (cfg))
7039 use_op_switch = TRUE;
7041 cfg->cbb->has_jump_table = 1;
7043 if (use_op_switch) {
7044 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7045 ins->sreg1 = src1->dreg;
7046 ins->inst_p0 = table;
7047 ins->inst_many_bb = targets;
7048 ins->klass = GUINT_TO_POINTER (n);
7049 MONO_ADD_INS (cfg->cbb, ins);
7051 if (sizeof (gpointer) == 8)
7052 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7054 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7056 #if SIZEOF_REGISTER == 8
7057 /* The upper word might not be zero, and we add it to a 64 bit address later */
7058 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7061 if (cfg->compile_aot) {
7062 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7064 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7065 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7066 ins->inst_p0 = table;
7067 ins->dreg = table_reg;
7068 MONO_ADD_INS (cfg->cbb, ins);
7071 /* FIXME: Use load_memindex */
7072 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7074 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7076 start_new_bblock = 1;
7077 inline_costs += (BRANCH_COST * 2);
7097 dreg = alloc_freg (cfg);
7100 dreg = alloc_lreg (cfg);
7103 dreg = alloc_preg (cfg);
7106 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7107 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7108 ins->flags |= ins_flag;
7110 MONO_ADD_INS (bblock, ins);
7125 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7126 ins->flags |= ins_flag;
7128 MONO_ADD_INS (bblock, ins);
7130 #if HAVE_WRITE_BARRIERS
7131 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7132 MonoInst *dummy_use;
7133 /* insert call to write barrier */
7134 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7135 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7136 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7147 MONO_INST_NEW (cfg, ins, (*ip));
7149 ins->sreg1 = sp [0]->dreg;
7150 ins->sreg2 = sp [1]->dreg;
7151 type_from_op (ins, sp [0], sp [1]);
7153 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7155 /* Use the immediate opcodes if possible */
7156 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7157 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7158 if (imm_opcode != -1) {
7159 ins->opcode = imm_opcode;
7160 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7163 sp [1]->opcode = OP_NOP;
7167 MONO_ADD_INS ((cfg)->cbb, (ins));
7169 *sp++ = mono_decompose_opcode (cfg, ins);
7186 MONO_INST_NEW (cfg, ins, (*ip));
7188 ins->sreg1 = sp [0]->dreg;
7189 ins->sreg2 = sp [1]->dreg;
7190 type_from_op (ins, sp [0], sp [1]);
7192 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7193 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7195 /* FIXME: Pass opcode to is_inst_imm */
7197 /* Use the immediate opcodes if possible */
7198 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7201 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7202 if (imm_opcode != -1) {
7203 ins->opcode = imm_opcode;
7204 if (sp [1]->opcode == OP_I8CONST) {
7205 #if SIZEOF_REGISTER == 8
7206 ins->inst_imm = sp [1]->inst_l;
7208 ins->inst_ls_word = sp [1]->inst_ls_word;
7209 ins->inst_ms_word = sp [1]->inst_ms_word;
7213 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7216 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7217 if (sp [1]->next == NULL)
7218 sp [1]->opcode = OP_NOP;
7221 MONO_ADD_INS ((cfg)->cbb, (ins));
7223 *sp++ = mono_decompose_opcode (cfg, ins);
7236 case CEE_CONV_OVF_I8:
7237 case CEE_CONV_OVF_U8:
7241 /* Special case this earlier so we have long constants in the IR */
7242 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7243 int data = sp [-1]->inst_c0;
7244 sp [-1]->opcode = OP_I8CONST;
7245 sp [-1]->type = STACK_I8;
7246 #if SIZEOF_REGISTER == 8
7247 if ((*ip) == CEE_CONV_U8)
7248 sp [-1]->inst_c0 = (guint32)data;
7250 sp [-1]->inst_c0 = data;
7252 sp [-1]->inst_ls_word = data;
7253 if ((*ip) == CEE_CONV_U8)
7254 sp [-1]->inst_ms_word = 0;
7256 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7258 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7265 case CEE_CONV_OVF_I4:
7266 case CEE_CONV_OVF_I1:
7267 case CEE_CONV_OVF_I2:
7268 case CEE_CONV_OVF_I:
7269 case CEE_CONV_OVF_U:
7272 if (sp [-1]->type == STACK_R8) {
7273 ADD_UNOP (CEE_CONV_OVF_I8);
7280 case CEE_CONV_OVF_U1:
7281 case CEE_CONV_OVF_U2:
7282 case CEE_CONV_OVF_U4:
7285 if (sp [-1]->type == STACK_R8) {
7286 ADD_UNOP (CEE_CONV_OVF_U8);
7293 case CEE_CONV_OVF_I1_UN:
7294 case CEE_CONV_OVF_I2_UN:
7295 case CEE_CONV_OVF_I4_UN:
7296 case CEE_CONV_OVF_I8_UN:
7297 case CEE_CONV_OVF_U1_UN:
7298 case CEE_CONV_OVF_U2_UN:
7299 case CEE_CONV_OVF_U4_UN:
7300 case CEE_CONV_OVF_U8_UN:
7301 case CEE_CONV_OVF_I_UN:
7302 case CEE_CONV_OVF_U_UN:
7309 CHECK_CFG_EXCEPTION;
7313 case CEE_ADD_OVF_UN:
7315 case CEE_MUL_OVF_UN:
7317 case CEE_SUB_OVF_UN:
7325 token = read32 (ip + 1);
7326 klass = mini_get_class (method, token, generic_context);
7327 CHECK_TYPELOAD (klass);
7329 if (generic_class_is_reference_type (cfg, klass)) {
7330 MonoInst *store, *load;
7331 int dreg = alloc_preg (cfg);
7333 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7334 load->flags |= ins_flag;
7335 MONO_ADD_INS (cfg->cbb, load);
7337 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7338 store->flags |= ins_flag;
7339 MONO_ADD_INS (cfg->cbb, store);
7341 #if HAVE_WRITE_BARRIERS
7342 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7343 MonoInst *dummy_use;
7344 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7345 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7346 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7350 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7362 token = read32 (ip + 1);
7363 klass = mini_get_class (method, token, generic_context);
7364 CHECK_TYPELOAD (klass);
7366 /* Optimize the common ldobj+stloc combination */
7376 loc_index = ip [5] - CEE_STLOC_0;
7383 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7384 CHECK_LOCAL (loc_index);
7386 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7387 ins->dreg = cfg->locals [loc_index]->dreg;
7393 /* Optimize the ldobj+stobj combination */
7394 /* The reference case ends up being a load+store anyway */
7395 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7400 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7407 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7416 CHECK_STACK_OVF (1);
7418 n = read32 (ip + 1);
7420 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7421 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7422 ins->type = STACK_OBJ;
7425 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7426 MonoInst *iargs [1];
7428 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7429 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7431 if (cfg->opt & MONO_OPT_SHARED) {
7432 MonoInst *iargs [3];
7434 if (cfg->compile_aot) {
7435 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7437 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7438 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7439 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7440 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7441 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7443 if (bblock->out_of_line) {
7444 MonoInst *iargs [2];
7446 if (image == mono_defaults.corlib) {
7448 * Avoid relocations in AOT and save some space by using a
7449 * version of helper_ldstr specialized to mscorlib.
7451 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7452 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7454 /* Avoid creating the string object */
7455 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7456 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7457 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7461 if (cfg->compile_aot) {
7462 NEW_LDSTRCONST (cfg, ins, image, n);
7464 MONO_ADD_INS (bblock, ins);
7467 NEW_PCONST (cfg, ins, NULL);
7468 ins->type = STACK_OBJ;
7469 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7471 MONO_ADD_INS (bblock, ins);
7480 MonoInst *iargs [2];
7481 MonoMethodSignature *fsig;
7484 MonoInst *vtable_arg = NULL;
7487 token = read32 (ip + 1);
7488 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7491 fsig = mono_method_get_signature (cmethod, image, token);
7495 mono_save_token_info (cfg, image, token, cmethod);
7497 if (!mono_class_init (cmethod->klass))
7500 if (cfg->generic_sharing_context)
7501 context_used = mono_method_check_context_used (cmethod);
7503 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7504 if (check_linkdemand (cfg, method, cmethod))
7506 CHECK_CFG_EXCEPTION;
7507 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7508 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7511 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7512 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7513 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7514 mono_class_vtable (cfg->domain, cmethod->klass);
7515 CHECK_TYPELOAD (cmethod->klass);
7517 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7518 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7521 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7522 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7524 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7526 CHECK_TYPELOAD (cmethod->klass);
7527 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7532 n = fsig->param_count;
7536 * Generate smaller code for the common newobj <exception> instruction in
7537 * argument checking code.
7539 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7540 is_exception_class (cmethod->klass) && n <= 2 &&
7541 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7542 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7543 MonoInst *iargs [3];
7545 g_assert (!vtable_arg);
7549 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7552 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7556 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7561 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7564 g_assert_not_reached ();
7572 /* move the args to allow room for 'this' in the first position */
7578 /* check_call_signature () requires sp[0] to be set */
7579 this_ins.type = STACK_OBJ;
7581 if (check_call_signature (cfg, fsig, sp))
7586 if (mini_class_is_system_array (cmethod->klass)) {
7587 g_assert (!vtable_arg);
7589 *sp = emit_get_rgctx_method (cfg, context_used,
7590 cmethod, MONO_RGCTX_INFO_METHOD);
7592 /* Avoid varargs in the common case */
7593 if (fsig->param_count == 1)
7594 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7595 else if (fsig->param_count == 2)
7596 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7597 else if (fsig->param_count == 3)
7598 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7600 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7601 } else if (cmethod->string_ctor) {
7602 g_assert (!context_used);
7603 g_assert (!vtable_arg);
7604 /* we simply pass a null pointer */
7605 EMIT_NEW_PCONST (cfg, *sp, NULL);
7606 /* now call the string ctor */
7607 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7609 MonoInst* callvirt_this_arg = NULL;
7611 if (cmethod->klass->valuetype) {
7612 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7613 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7614 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7619 * The code generated by mini_emit_virtual_call () expects
7620 * iargs [0] to be a boxed instance, but luckily the vcall
7621 * will be transformed into a normal call there.
7623 } else if (context_used) {
7624 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7627 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7629 CHECK_TYPELOAD (cmethod->klass);
7632 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7633 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7634 * As a workaround, we call class cctors before allocating objects.
7636 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7637 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7638 if (cfg->verbose_level > 2)
7639 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7640 class_inits = g_slist_prepend (class_inits, vtable);
7643 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7646 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7649 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7651 /* Now call the actual ctor */
7652 /* Avoid virtual calls to ctors if possible */
7653 if (cmethod->klass->marshalbyref)
7654 callvirt_this_arg = sp [0];
7657 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7658 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7659 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7664 CHECK_CFG_EXCEPTION;
7669 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7670 mono_method_check_inlining (cfg, cmethod) &&
7671 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7672 !g_list_find (dont_inline, cmethod)) {
7675 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7676 cfg->real_offset += 5;
7679 inline_costs += costs - 5;
7682 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7684 } else if (context_used &&
7685 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7686 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7687 MonoInst *cmethod_addr;
7689 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7690 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7692 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7695 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7696 callvirt_this_arg, NULL, vtable_arg);
7700 if (alloc == NULL) {
7702 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7703 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7717 token = read32 (ip + 1);
7718 klass = mini_get_class (method, token, generic_context);
7719 CHECK_TYPELOAD (klass);
7720 if (sp [0]->type != STACK_OBJ)
7723 if (cfg->generic_sharing_context)
7724 context_used = mono_class_check_context_used (klass);
7726 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7733 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7735 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7739 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7740 MonoMethod *mono_castclass;
7741 MonoInst *iargs [1];
7744 mono_castclass = mono_marshal_get_castclass (klass);
7747 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7748 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7749 g_assert (costs > 0);
7752 cfg->real_offset += 5;
7757 inline_costs += costs;
7760 ins = handle_castclass (cfg, klass, *sp, context_used);
7761 CHECK_CFG_EXCEPTION;
7771 token = read32 (ip + 1);
7772 klass = mini_get_class (method, token, generic_context);
7773 CHECK_TYPELOAD (klass);
7774 if (sp [0]->type != STACK_OBJ)
7777 if (cfg->generic_sharing_context)
7778 context_used = mono_class_check_context_used (klass);
7780 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7787 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7789 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7793 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7794 MonoMethod *mono_isinst;
7795 MonoInst *iargs [1];
7798 mono_isinst = mono_marshal_get_isinst (klass);
7801 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7802 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7803 g_assert (costs > 0);
7806 cfg->real_offset += 5;
7811 inline_costs += costs;
7814 ins = handle_isinst (cfg, klass, *sp, context_used);
7815 CHECK_CFG_EXCEPTION;
7822 case CEE_UNBOX_ANY: {
7826 token = read32 (ip + 1);
7827 klass = mini_get_class (method, token, generic_context);
7828 CHECK_TYPELOAD (klass);
7830 mono_save_token_info (cfg, image, token, klass);
7832 if (cfg->generic_sharing_context)
7833 context_used = mono_class_check_context_used (klass);
7835 if (generic_class_is_reference_type (cfg, klass)) {
7836 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7838 MonoInst *iargs [2];
7843 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7844 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7848 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7849 MonoMethod *mono_castclass;
7850 MonoInst *iargs [1];
7853 mono_castclass = mono_marshal_get_castclass (klass);
7856 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7857 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7859 g_assert (costs > 0);
7862 cfg->real_offset += 5;
7866 inline_costs += costs;
7868 ins = handle_castclass (cfg, klass, *sp, 0);
7869 CHECK_CFG_EXCEPTION;
7877 if (mono_class_is_nullable (klass)) {
7878 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7885 ins = handle_unbox (cfg, klass, sp, context_used);
7891 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7904 token = read32 (ip + 1);
7905 klass = mini_get_class (method, token, generic_context);
7906 CHECK_TYPELOAD (klass);
7908 mono_save_token_info (cfg, image, token, klass);
7910 if (cfg->generic_sharing_context)
7911 context_used = mono_class_check_context_used (klass);
7913 if (generic_class_is_reference_type (cfg, klass)) {
7919 if (klass == mono_defaults.void_class)
7921 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7923 /* frequent check in generic code: box (struct), brtrue */
7924 if (!mono_class_is_nullable (klass) &&
7925 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7926 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7928 MONO_INST_NEW (cfg, ins, OP_BR);
7929 if (*ip == CEE_BRTRUE_S) {
7932 target = ip + 1 + (signed char)(*ip);
7937 target = ip + 4 + (gint)(read32 (ip));
7940 GET_BBLOCK (cfg, tblock, target);
7941 link_bblock (cfg, bblock, tblock);
7942 ins->inst_target_bb = tblock;
7943 GET_BBLOCK (cfg, tblock, ip);
7945 * This leads to some inconsistency, since the two bblocks are
7946 * not really connected, but it is needed for handling stack
7947 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7948 * FIXME: This should only be needed if sp != stack_start, but that
7949 * doesn't work for some reason (test failure in mcs/tests on x86).
7951 link_bblock (cfg, bblock, tblock);
7952 if (sp != stack_start) {
7953 handle_stack_args (cfg, stack_start, sp - stack_start);
7955 CHECK_UNVERIFIABLE (cfg);
7957 MONO_ADD_INS (bblock, ins);
7958 start_new_bblock = 1;
7962 *sp++ = handle_box (cfg, val, klass, context_used);
7964 CHECK_CFG_EXCEPTION;
7973 token = read32 (ip + 1);
7974 klass = mini_get_class (method, token, generic_context);
7975 CHECK_TYPELOAD (klass);
7977 mono_save_token_info (cfg, image, token, klass);
7979 if (cfg->generic_sharing_context)
7980 context_used = mono_class_check_context_used (klass);
7982 if (mono_class_is_nullable (klass)) {
7985 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7986 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7990 ins = handle_unbox (cfg, klass, sp, context_used);
8000 MonoClassField *field;
8004 if (*ip == CEE_STFLD) {
8011 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8013 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8016 token = read32 (ip + 1);
8017 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8018 field = mono_method_get_wrapper_data (method, token);
8019 klass = field->parent;
8022 field = mono_field_from_token (image, token, &klass, generic_context);
8026 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8027 FIELD_ACCESS_FAILURE;
8028 mono_class_init (klass);
8030 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8031 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8032 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8033 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8036 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8037 if (*ip == CEE_STFLD) {
8038 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8040 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8041 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8042 MonoInst *iargs [5];
8045 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8046 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8047 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8051 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8052 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8053 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8054 g_assert (costs > 0);
8056 cfg->real_offset += 5;
8059 inline_costs += costs;
8061 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8066 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8068 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8069 if (sp [0]->opcode != OP_LDADDR)
8070 store->flags |= MONO_INST_FAULT;
8072 #if HAVE_WRITE_BARRIERS
8073 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8074 /* insert call to write barrier */
8075 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8076 MonoInst *iargs [2], *dummy_use;
8079 dreg = alloc_preg (cfg);
8080 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8082 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8084 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8088 store->flags |= ins_flag;
8095 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8096 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8097 MonoInst *iargs [4];
8100 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8101 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8102 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8103 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8104 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8105 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8107 g_assert (costs > 0);
8109 cfg->real_offset += 5;
8113 inline_costs += costs;
8115 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8119 if (sp [0]->type == STACK_VTYPE) {
8122 /* Have to compute the address of the variable */
8124 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8126 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8128 g_assert (var->klass == klass);
8130 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8134 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8136 if (*ip == CEE_LDFLDA) {
8137 dreg = alloc_preg (cfg);
8139 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8140 ins->klass = mono_class_from_mono_type (field->type);
8141 ins->type = STACK_MP;
8146 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8147 load->flags |= ins_flag;
8148 if (sp [0]->opcode != OP_LDADDR)
8149 load->flags |= MONO_INST_FAULT;
8160 MonoClassField *field;
8161 gpointer addr = NULL;
8162 gboolean is_special_static;
8165 token = read32 (ip + 1);
8167 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8168 field = mono_method_get_wrapper_data (method, token);
8169 klass = field->parent;
8172 field = mono_field_from_token (image, token, &klass, generic_context);
8175 mono_class_init (klass);
8176 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8177 FIELD_ACCESS_FAILURE;
8179 /* if the class is Critical then transparent code cannot access it's fields */
8180 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8181 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8184 * We can only support shared generic static
8185 * field access on architectures where the
8186 * trampoline code has been extended to handle
8187 * the generic class init.
8189 #ifndef MONO_ARCH_VTABLE_REG
8190 GENERIC_SHARING_FAILURE (*ip);
8193 if (cfg->generic_sharing_context)
8194 context_used = mono_class_check_context_used (klass);
8196 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8198 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8199 * to be called here.
8201 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8202 mono_class_vtable (cfg->domain, klass);
8203 CHECK_TYPELOAD (klass);
8205 mono_domain_lock (cfg->domain);
8206 if (cfg->domain->special_static_fields)
8207 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8208 mono_domain_unlock (cfg->domain);
8210 is_special_static = mono_class_field_is_special_static (field);
8212 /* Generate IR to compute the field address */
8213 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8215 * Fast access to TLS data
8216 * Inline version of get_thread_static_data () in
8220 int idx, static_data_reg, array_reg, dreg;
8221 MonoInst *thread_ins;
8223 // offset &= 0x7fffffff;
8224 // idx = (offset >> 24) - 1;
8225 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8227 thread_ins = mono_get_thread_intrinsic (cfg);
8228 MONO_ADD_INS (cfg->cbb, thread_ins);
8229 static_data_reg = alloc_ireg (cfg);
8230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8232 if (cfg->compile_aot) {
8233 int offset_reg, offset2_reg, idx_reg;
8235 /* For TLS variables, this will return the TLS offset */
8236 EMIT_NEW_SFLDACONST (cfg, ins, field);
8237 offset_reg = ins->dreg;
8238 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8239 idx_reg = alloc_ireg (cfg);
8240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8243 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8244 array_reg = alloc_ireg (cfg);
8245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8246 offset2_reg = alloc_ireg (cfg);
8247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8248 dreg = alloc_ireg (cfg);
8249 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8251 offset = (gsize)addr & 0x7fffffff;
8252 idx = (offset >> 24) - 1;
8254 array_reg = alloc_ireg (cfg);
8255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8256 dreg = alloc_ireg (cfg);
8257 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8259 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8260 (cfg->compile_aot && is_special_static) ||
8261 (context_used && is_special_static)) {
8262 MonoInst *iargs [2];
8264 g_assert (field->parent);
8265 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8267 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8268 field, MONO_RGCTX_INFO_CLASS_FIELD);
8270 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8272 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8273 } else if (context_used) {
8274 MonoInst *static_data;
8277 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8278 method->klass->name_space, method->klass->name, method->name,
8279 depth, field->offset);
8282 if (mono_class_needs_cctor_run (klass, method))
8283 emit_generic_class_init (cfg, klass);
8286 * The pointer we're computing here is
8288 * super_info.static_data + field->offset
8290 static_data = emit_get_rgctx_klass (cfg, context_used,
8291 klass, MONO_RGCTX_INFO_STATIC_DATA);
8293 if (field->offset == 0) {
8296 int addr_reg = mono_alloc_preg (cfg);
8297 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8299 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8300 MonoInst *iargs [2];
8302 g_assert (field->parent);
8303 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8304 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8305 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8307 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8309 CHECK_TYPELOAD (klass);
8311 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8312 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8313 if (cfg->verbose_level > 2)
8314 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8315 class_inits = g_slist_prepend (class_inits, vtable);
8317 if (cfg->run_cctors) {
8319 /* This makes so that inline cannot trigger */
8320 /* .cctors: too many apps depend on them */
8321 /* running with a specific order... */
8322 if (! vtable->initialized)
8324 ex = mono_runtime_class_init_full (vtable, FALSE);
8326 set_exception_object (cfg, ex);
8327 goto exception_exit;
8331 addr = (char*)vtable->data + field->offset;
8333 if (cfg->compile_aot)
8334 EMIT_NEW_SFLDACONST (cfg, ins, field);
8336 EMIT_NEW_PCONST (cfg, ins, addr);
8338 MonoInst *iargs [1];
8339 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8340 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8344 /* Generate IR to do the actual load/store operation */
8346 if (*ip == CEE_LDSFLDA) {
8347 ins->klass = mono_class_from_mono_type (field->type);
8348 ins->type = STACK_PTR;
8350 } else if (*ip == CEE_STSFLD) {
8355 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8356 store->flags |= ins_flag;
8358 gboolean is_const = FALSE;
8359 MonoVTable *vtable = NULL;
8361 if (!context_used) {
8362 vtable = mono_class_vtable (cfg->domain, klass);
8363 CHECK_TYPELOAD (klass);
8365 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8366 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8367 gpointer addr = (char*)vtable->data + field->offset;
8368 int ro_type = field->type->type;
8369 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8370 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8372 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8375 case MONO_TYPE_BOOLEAN:
8377 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8381 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8384 case MONO_TYPE_CHAR:
8386 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8390 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8395 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8399 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8405 case MONO_TYPE_FNPTR:
8406 #ifndef HAVE_MOVING_COLLECTOR
8407 case MONO_TYPE_STRING:
8408 case MONO_TYPE_OBJECT:
8409 case MONO_TYPE_CLASS:
8410 case MONO_TYPE_SZARRAY:
8411 case MONO_TYPE_ARRAY:
8413 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8414 type_to_eval_stack_type ((cfg), field->type, *sp);
8419 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8424 case MONO_TYPE_VALUETYPE:
8434 CHECK_STACK_OVF (1);
8436 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8437 load->flags |= ins_flag;
8450 token = read32 (ip + 1);
8451 klass = mini_get_class (method, token, generic_context);
8452 CHECK_TYPELOAD (klass);
8453 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8454 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8455 #if HAVE_WRITE_BARRIERS
8456 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8457 generic_class_is_reference_type (cfg, klass)) {
8458 MonoInst *dummy_use;
8459 /* insert call to write barrier */
8460 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8461 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8462 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8475 const char *data_ptr;
8477 guint32 field_token;
8483 token = read32 (ip + 1);
8485 klass = mini_get_class (method, token, generic_context);
8486 CHECK_TYPELOAD (klass);
8488 if (cfg->generic_sharing_context)
8489 context_used = mono_class_check_context_used (klass);
8491 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8492 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8493 ins->sreg1 = sp [0]->dreg;
8494 ins->type = STACK_I4;
8495 ins->dreg = alloc_ireg (cfg);
8496 MONO_ADD_INS (cfg->cbb, ins);
8497 *sp = mono_decompose_opcode (cfg, ins);
8502 MonoClass *array_class = mono_array_class_get (klass, 1);
8503 /* FIXME: we cannot get a managed
8504 allocator because we can't get the
8505 open generic class's vtable. We
8506 have the same problem in
8507 handle_alloc(). This
8508 needs to be solved so that we can
8509 have managed allocs of shared
8512 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8513 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8515 MonoMethod *managed_alloc = NULL;
8517 /* FIXME: Decompose later to help abcrem */
8520 args [0] = emit_get_rgctx_klass (cfg, context_used,
8521 array_class, MONO_RGCTX_INFO_VTABLE);
8526 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8528 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8530 if (cfg->opt & MONO_OPT_SHARED) {
8531 /* Decompose now to avoid problems with references to the domainvar */
8532 MonoInst *iargs [3];
8534 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8535 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8538 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8540 /* Decompose later since it is needed by abcrem */
8541 MonoClass *array_type = mono_array_class_get (klass, 1);
8542 mono_class_vtable (cfg->domain, array_type);
8543 CHECK_TYPELOAD (array_type);
8545 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8546 ins->dreg = alloc_preg (cfg);
8547 ins->sreg1 = sp [0]->dreg;
8548 ins->inst_newa_class = klass;
8549 ins->type = STACK_OBJ;
8551 MONO_ADD_INS (cfg->cbb, ins);
8552 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8553 cfg->cbb->has_array_access = TRUE;
8555 /* Needed so mono_emit_load_get_addr () gets called */
8556 mono_get_got_var (cfg);
8566 * we inline/optimize the initialization sequence if possible.
8567 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8568 * for small sizes open code the memcpy
8569 * ensure the rva field is big enough
8571 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8572 MonoMethod *memcpy_method = get_memcpy_method ();
8573 MonoInst *iargs [3];
8574 int add_reg = alloc_preg (cfg);
8576 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8577 if (cfg->compile_aot) {
8578 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8580 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8582 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8583 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8592 if (sp [0]->type != STACK_OBJ)
8595 dreg = alloc_preg (cfg);
8596 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8597 ins->dreg = alloc_preg (cfg);
8598 ins->sreg1 = sp [0]->dreg;
8599 ins->type = STACK_I4;
8600 MONO_ADD_INS (cfg->cbb, ins);
8601 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8602 cfg->cbb->has_array_access = TRUE;
8610 if (sp [0]->type != STACK_OBJ)
8613 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8615 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8616 CHECK_TYPELOAD (klass);
8617 /* we need to make sure that this array is exactly the type it needs
8618 * to be for correctness. the wrappers are lax with their usage
8619 * so we need to ignore them here
8621 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8622 MonoClass *array_class = mono_array_class_get (klass, 1);
8623 mini_emit_check_array_type (cfg, sp [0], array_class);
8624 CHECK_TYPELOAD (array_class);
8628 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8643 case CEE_LDELEM_REF: {
8649 if (*ip == CEE_LDELEM) {
8651 token = read32 (ip + 1);
8652 klass = mini_get_class (method, token, generic_context);
8653 CHECK_TYPELOAD (klass);
8654 mono_class_init (klass);
8657 klass = array_access_to_klass (*ip);
8659 if (sp [0]->type != STACK_OBJ)
8662 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8664 if (sp [1]->opcode == OP_ICONST) {
8665 int array_reg = sp [0]->dreg;
8666 int index_reg = sp [1]->dreg;
8667 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8669 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8670 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8672 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8673 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8676 if (*ip == CEE_LDELEM)
8689 case CEE_STELEM_REF:
8696 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8698 if (*ip == CEE_STELEM) {
8700 token = read32 (ip + 1);
8701 klass = mini_get_class (method, token, generic_context);
8702 CHECK_TYPELOAD (klass);
8703 mono_class_init (klass);
8706 klass = array_access_to_klass (*ip);
8708 if (sp [0]->type != STACK_OBJ)
8711 /* storing a NULL doesn't need any of the complex checks in stelemref */
8712 if (generic_class_is_reference_type (cfg, klass) &&
8713 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8714 MonoMethod* helper = mono_marshal_get_stelemref ();
8715 MonoInst *iargs [3];
8717 if (sp [0]->type != STACK_OBJ)
8719 if (sp [2]->type != STACK_OBJ)
8726 mono_emit_method_call (cfg, helper, iargs, NULL);
8728 if (sp [1]->opcode == OP_ICONST) {
8729 int array_reg = sp [0]->dreg;
8730 int index_reg = sp [1]->dreg;
8731 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8733 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8734 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8736 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8737 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8741 if (*ip == CEE_STELEM)
8748 case CEE_CKFINITE: {
8752 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8753 ins->sreg1 = sp [0]->dreg;
8754 ins->dreg = alloc_freg (cfg);
8755 ins->type = STACK_R8;
8756 MONO_ADD_INS (bblock, ins);
8758 *sp++ = mono_decompose_opcode (cfg, ins);
8763 case CEE_REFANYVAL: {
8764 MonoInst *src_var, *src;
8766 int klass_reg = alloc_preg (cfg);
8767 int dreg = alloc_preg (cfg);
8770 MONO_INST_NEW (cfg, ins, *ip);
8773 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8774 CHECK_TYPELOAD (klass);
8775 mono_class_init (klass);
8777 if (cfg->generic_sharing_context)
8778 context_used = mono_class_check_context_used (klass);
8781 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8783 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8784 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8788 MonoInst *klass_ins;
8790 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8791 klass, MONO_RGCTX_INFO_KLASS);
8794 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8795 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8797 mini_emit_class_check (cfg, klass_reg, klass);
8799 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8800 ins->type = STACK_MP;
8805 case CEE_MKREFANY: {
8806 MonoInst *loc, *addr;
8809 MONO_INST_NEW (cfg, ins, *ip);
8812 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8813 CHECK_TYPELOAD (klass);
8814 mono_class_init (klass);
8816 if (cfg->generic_sharing_context)
8817 context_used = mono_class_check_context_used (klass);
8819 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8820 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8823 MonoInst *const_ins;
8824 int type_reg = alloc_preg (cfg);
8826 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8828 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8830 } else if (cfg->compile_aot) {
8831 int const_reg = alloc_preg (cfg);
8832 int type_reg = alloc_preg (cfg);
8834 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8835 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8840 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8842 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8844 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8845 ins->type = STACK_VTYPE;
8846 ins->klass = mono_defaults.typed_reference_class;
8853 MonoClass *handle_class;
8855 CHECK_STACK_OVF (1);
8858 n = read32 (ip + 1);
8860 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8861 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8862 handle = mono_method_get_wrapper_data (method, n);
8863 handle_class = mono_method_get_wrapper_data (method, n + 1);
8864 if (handle_class == mono_defaults.typehandle_class)
8865 handle = &((MonoClass*)handle)->byval_arg;
8868 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8872 mono_class_init (handle_class);
8873 if (cfg->generic_sharing_context) {
8874 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8875 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8876 /* This case handles ldtoken
8877 of an open type, like for
8880 } else if (handle_class == mono_defaults.typehandle_class) {
8881 /* If we get a MONO_TYPE_CLASS
8882 then we need to provide the
8884 instantiation of it. */
8885 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8888 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8889 } else if (handle_class == mono_defaults.fieldhandle_class)
8890 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8891 else if (handle_class == mono_defaults.methodhandle_class)
8892 context_used = mono_method_check_context_used (handle);
8894 g_assert_not_reached ();
8897 if ((cfg->opt & MONO_OPT_SHARED) &&
8898 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8899 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8900 MonoInst *addr, *vtvar, *iargs [3];
8901 int method_context_used;
8903 if (cfg->generic_sharing_context)
8904 method_context_used = mono_method_check_context_used (method);
8906 method_context_used = 0;
8908 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8910 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8911 EMIT_NEW_ICONST (cfg, iargs [1], n);
8912 if (method_context_used) {
8913 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8914 method, MONO_RGCTX_INFO_METHOD);
8915 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8917 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8918 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8920 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8924 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8926 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8927 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8928 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8929 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8930 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8931 MonoClass *tclass = mono_class_from_mono_type (handle);
8933 mono_class_init (tclass);
8935 ins = emit_get_rgctx_klass (cfg, context_used,
8936 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8937 } else if (cfg->compile_aot) {
8938 if (method->wrapper_type) {
8939 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8940 /* Special case for static synchronized wrappers */
8941 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8943 /* FIXME: n is not a normal token */
8944 cfg->disable_aot = TRUE;
8945 EMIT_NEW_PCONST (cfg, ins, NULL);
8948 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8951 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8953 ins->type = STACK_OBJ;
8954 ins->klass = cmethod->klass;
8957 MonoInst *addr, *vtvar;
8959 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8962 if (handle_class == mono_defaults.typehandle_class) {
8963 ins = emit_get_rgctx_klass (cfg, context_used,
8964 mono_class_from_mono_type (handle),
8965 MONO_RGCTX_INFO_TYPE);
8966 } else if (handle_class == mono_defaults.methodhandle_class) {
8967 ins = emit_get_rgctx_method (cfg, context_used,
8968 handle, MONO_RGCTX_INFO_METHOD);
8969 } else if (handle_class == mono_defaults.fieldhandle_class) {
8970 ins = emit_get_rgctx_field (cfg, context_used,
8971 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8973 g_assert_not_reached ();
8975 } else if (cfg->compile_aot) {
8976 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8978 EMIT_NEW_PCONST (cfg, ins, handle);
8980 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8981 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8982 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8992 MONO_INST_NEW (cfg, ins, OP_THROW);
8994 ins->sreg1 = sp [0]->dreg;
8996 bblock->out_of_line = TRUE;
8997 MONO_ADD_INS (bblock, ins);
8998 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8999 MONO_ADD_INS (bblock, ins);
9002 link_bblock (cfg, bblock, end_bblock);
9003 start_new_bblock = 1;
9005 case CEE_ENDFINALLY:
9006 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9007 MONO_ADD_INS (bblock, ins);
9009 start_new_bblock = 1;
9012 * Control will leave the method so empty the stack, otherwise
9013 * the next basic block will start with a nonempty stack.
9015 while (sp != stack_start) {
9023 if (*ip == CEE_LEAVE) {
9025 target = ip + 5 + (gint32)read32(ip + 1);
9028 target = ip + 2 + (signed char)(ip [1]);
9031 /* empty the stack */
9032 while (sp != stack_start) {
9037 * If this leave statement is in a catch block, check for a
9038 * pending exception, and rethrow it if necessary.
9039 * We avoid doing this in runtime invoke wrappers, since those are called
9040 * by native code which excepts the wrapper to catch all exceptions.
9042 for (i = 0; i < header->num_clauses; ++i) {
9043 MonoExceptionClause *clause = &header->clauses [i];
9046 * Use <= in the final comparison to handle clauses with multiple
9047 * leave statements, like in bug #78024.
9048 * The ordering of the exception clauses guarantees that we find the
9051 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9053 MonoBasicBlock *dont_throw;
9058 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9061 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9063 NEW_BBLOCK (cfg, dont_throw);
9066 * Currently, we allways rethrow the abort exception, despite the
9067 * fact that this is not correct. See thread6.cs for an example.
9068 * But propagating the abort exception is more important than
9069 * getting the sematics right.
9071 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9073 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9075 MONO_START_BB (cfg, dont_throw);
9080 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9082 MonoExceptionClause *clause;
9084 for (tmp = handlers; tmp; tmp = tmp->next) {
9086 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9088 link_bblock (cfg, bblock, tblock);
9089 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9090 ins->inst_target_bb = tblock;
9091 ins->inst_eh_block = clause;
9092 MONO_ADD_INS (bblock, ins);
9093 bblock->has_call_handler = 1;
9094 if (COMPILE_LLVM (cfg)) {
9095 MonoBasicBlock *target_bb;
9098 * Link the finally bblock with the target, since it will
9099 * conceptually branch there.
9100 * FIXME: Have to link the bblock containing the endfinally.
9102 GET_BBLOCK (cfg, target_bb, target);
9103 link_bblock (cfg, tblock, target_bb);
9106 g_list_free (handlers);
9109 MONO_INST_NEW (cfg, ins, OP_BR);
9110 MONO_ADD_INS (bblock, ins);
9111 GET_BBLOCK (cfg, tblock, target);
9112 link_bblock (cfg, bblock, tblock);
9113 ins->inst_target_bb = tblock;
9114 start_new_bblock = 1;
9116 if (*ip == CEE_LEAVE)
9125 * Mono specific opcodes
9127 case MONO_CUSTOM_PREFIX: {
9129 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9133 case CEE_MONO_ICALL: {
9135 MonoJitICallInfo *info;
9137 token = read32 (ip + 2);
9138 func = mono_method_get_wrapper_data (method, token);
9139 info = mono_find_jit_icall_by_addr (func);
9142 CHECK_STACK (info->sig->param_count);
9143 sp -= info->sig->param_count;
9145 ins = mono_emit_jit_icall (cfg, info->func, sp);
9146 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9150 inline_costs += 10 * num_calls++;
9154 case CEE_MONO_LDPTR: {
9157 CHECK_STACK_OVF (1);
9159 token = read32 (ip + 2);
9161 ptr = mono_method_get_wrapper_data (method, token);
9162 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9163 MonoJitICallInfo *callinfo;
9164 const char *icall_name;
9166 icall_name = method->name + strlen ("__icall_wrapper_");
9167 g_assert (icall_name);
9168 callinfo = mono_find_jit_icall_by_name (icall_name);
9169 g_assert (callinfo);
9171 if (ptr == callinfo->func) {
9172 /* Will be transformed into an AOTCONST later */
9173 EMIT_NEW_PCONST (cfg, ins, ptr);
9179 /* FIXME: Generalize this */
9180 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9181 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9186 EMIT_NEW_PCONST (cfg, ins, ptr);
9189 inline_costs += 10 * num_calls++;
9190 /* Can't embed random pointers into AOT code */
9191 cfg->disable_aot = 1;
9194 case CEE_MONO_ICALL_ADDR: {
9195 MonoMethod *cmethod;
9198 CHECK_STACK_OVF (1);
9200 token = read32 (ip + 2);
9202 cmethod = mono_method_get_wrapper_data (method, token);
9204 if (cfg->compile_aot) {
9205 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9207 ptr = mono_lookup_internal_call (cmethod);
9209 EMIT_NEW_PCONST (cfg, ins, ptr);
9215 case CEE_MONO_VTADDR: {
9216 MonoInst *src_var, *src;
9222 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9223 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9228 case CEE_MONO_NEWOBJ: {
9229 MonoInst *iargs [2];
9231 CHECK_STACK_OVF (1);
9233 token = read32 (ip + 2);
9234 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9235 mono_class_init (klass);
9236 NEW_DOMAINCONST (cfg, iargs [0]);
9237 MONO_ADD_INS (cfg->cbb, iargs [0]);
9238 NEW_CLASSCONST (cfg, iargs [1], klass);
9239 MONO_ADD_INS (cfg->cbb, iargs [1]);
9240 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9242 inline_costs += 10 * num_calls++;
9245 case CEE_MONO_OBJADDR:
9248 MONO_INST_NEW (cfg, ins, OP_MOVE);
9249 ins->dreg = alloc_preg (cfg);
9250 ins->sreg1 = sp [0]->dreg;
9251 ins->type = STACK_MP;
9252 MONO_ADD_INS (cfg->cbb, ins);
9256 case CEE_MONO_LDNATIVEOBJ:
9258 * Similar to LDOBJ, but instead load the unmanaged
9259 * representation of the vtype to the stack.
9264 token = read32 (ip + 2);
9265 klass = mono_method_get_wrapper_data (method, token);
9266 g_assert (klass->valuetype);
9267 mono_class_init (klass);
9270 MonoInst *src, *dest, *temp;
9273 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9274 temp->backend.is_pinvoke = 1;
9275 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9276 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9278 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9279 dest->type = STACK_VTYPE;
9280 dest->klass = klass;
9286 case CEE_MONO_RETOBJ: {
9288 * Same as RET, but return the native representation of a vtype
9291 g_assert (cfg->ret);
9292 g_assert (mono_method_signature (method)->pinvoke);
9297 token = read32 (ip + 2);
9298 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9300 if (!cfg->vret_addr) {
9301 g_assert (cfg->ret_var_is_local);
9303 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9305 EMIT_NEW_RETLOADA (cfg, ins);
9307 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9309 if (sp != stack_start)
9312 MONO_INST_NEW (cfg, ins, OP_BR);
9313 ins->inst_target_bb = end_bblock;
9314 MONO_ADD_INS (bblock, ins);
9315 link_bblock (cfg, bblock, end_bblock);
9316 start_new_bblock = 1;
9320 case CEE_MONO_CISINST:
9321 case CEE_MONO_CCASTCLASS: {
9326 token = read32 (ip + 2);
9327 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9328 if (ip [1] == CEE_MONO_CISINST)
9329 ins = handle_cisinst (cfg, klass, sp [0]);
9331 ins = handle_ccastclass (cfg, klass, sp [0]);
9337 case CEE_MONO_SAVE_LMF:
9338 case CEE_MONO_RESTORE_LMF:
9339 #ifdef MONO_ARCH_HAVE_LMF_OPS
9340 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9341 MONO_ADD_INS (bblock, ins);
9342 cfg->need_lmf_area = TRUE;
9346 case CEE_MONO_CLASSCONST:
9347 CHECK_STACK_OVF (1);
9349 token = read32 (ip + 2);
9350 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9353 inline_costs += 10 * num_calls++;
9355 case CEE_MONO_NOT_TAKEN:
9356 bblock->out_of_line = TRUE;
9360 CHECK_STACK_OVF (1);
9362 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9363 ins->dreg = alloc_preg (cfg);
9364 ins->inst_offset = (gint32)read32 (ip + 2);
9365 ins->type = STACK_PTR;
9366 MONO_ADD_INS (bblock, ins);
9370 case CEE_MONO_DYN_CALL: {
9373 /* It would be easier to call a trampoline, but that would put an
9374 * extra frame on the stack, confusing exception handling. So
9375 * implement it inline using an opcode for now.
9378 if (!cfg->dyn_call_var) {
9379 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9380 /* prevent it from being register allocated */
9381 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9384 /* Has to use a call inst since it local regalloc expects it */
9385 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9386 ins = (MonoInst*)call;
9388 ins->sreg1 = sp [0]->dreg;
9389 ins->sreg2 = sp [1]->dreg;
9390 MONO_ADD_INS (bblock, ins);
9392 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9393 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9397 inline_costs += 10 * num_calls++;
9402 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9412 /* somewhat similar to LDTOKEN */
9413 MonoInst *addr, *vtvar;
9414 CHECK_STACK_OVF (1);
9415 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9417 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9418 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9420 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9421 ins->type = STACK_VTYPE;
9422 ins->klass = mono_defaults.argumenthandle_class;
9435 * The following transforms:
9436 * CEE_CEQ into OP_CEQ
9437 * CEE_CGT into OP_CGT
9438 * CEE_CGT_UN into OP_CGT_UN
9439 * CEE_CLT into OP_CLT
9440 * CEE_CLT_UN into OP_CLT_UN
9442 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9444 MONO_INST_NEW (cfg, ins, cmp->opcode);
9446 cmp->sreg1 = sp [0]->dreg;
9447 cmp->sreg2 = sp [1]->dreg;
9448 type_from_op (cmp, sp [0], sp [1]);
9450 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9451 cmp->opcode = OP_LCOMPARE;
9452 else if (sp [0]->type == STACK_R8)
9453 cmp->opcode = OP_FCOMPARE;
9455 cmp->opcode = OP_ICOMPARE;
9456 MONO_ADD_INS (bblock, cmp);
9457 ins->type = STACK_I4;
9458 ins->dreg = alloc_dreg (cfg, ins->type);
9459 type_from_op (ins, sp [0], sp [1]);
9461 if (cmp->opcode == OP_FCOMPARE) {
9463 * The backends expect the fceq opcodes to do the
9466 cmp->opcode = OP_NOP;
9467 ins->sreg1 = cmp->sreg1;
9468 ins->sreg2 = cmp->sreg2;
9470 MONO_ADD_INS (bblock, ins);
9477 MonoMethod *cil_method;
9478 gboolean needs_static_rgctx_invoke;
9480 CHECK_STACK_OVF (1);
9482 n = read32 (ip + 2);
9483 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9486 mono_class_init (cmethod->klass);
9488 mono_save_token_info (cfg, image, n, cmethod);
9490 if (cfg->generic_sharing_context)
9491 context_used = mono_method_check_context_used (cmethod);
9493 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9495 cil_method = cmethod;
9496 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9497 METHOD_ACCESS_FAILURE;
9499 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9500 if (check_linkdemand (cfg, method, cmethod))
9502 CHECK_CFG_EXCEPTION;
9503 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9504 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9508 * Optimize the common case of ldftn+delegate creation
9510 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9511 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9512 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9514 int invoke_context_used = 0;
9516 invoke = mono_get_delegate_invoke (ctor_method->klass);
9517 if (!invoke || !mono_method_signature (invoke))
9520 if (cfg->generic_sharing_context)
9521 invoke_context_used = mono_method_check_context_used (invoke);
9523 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9524 /* FIXME: SGEN support */
9525 if (invoke_context_used == 0) {
9526 MonoInst *target_ins;
9529 if (cfg->verbose_level > 3)
9530 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9531 target_ins = sp [-1];
9533 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9534 CHECK_CFG_EXCEPTION;
9543 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9544 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9548 inline_costs += 10 * num_calls++;
9551 case CEE_LDVIRTFTN: {
9556 n = read32 (ip + 2);
9557 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9560 mono_class_init (cmethod->klass);
9562 if (cfg->generic_sharing_context)
9563 context_used = mono_method_check_context_used (cmethod);
9565 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9566 if (check_linkdemand (cfg, method, cmethod))
9568 CHECK_CFG_EXCEPTION;
9569 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9570 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9576 args [1] = emit_get_rgctx_method (cfg, context_used,
9577 cmethod, MONO_RGCTX_INFO_METHOD);
9580 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9582 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9585 inline_costs += 10 * num_calls++;
9589 CHECK_STACK_OVF (1);
9591 n = read16 (ip + 2);
9593 EMIT_NEW_ARGLOAD (cfg, ins, n);
9598 CHECK_STACK_OVF (1);
9600 n = read16 (ip + 2);
9602 NEW_ARGLOADA (cfg, ins, n);
9603 MONO_ADD_INS (cfg->cbb, ins);
9611 n = read16 (ip + 2);
9613 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9615 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9619 CHECK_STACK_OVF (1);
9621 n = read16 (ip + 2);
9623 EMIT_NEW_LOCLOAD (cfg, ins, n);
9628 unsigned char *tmp_ip;
9629 CHECK_STACK_OVF (1);
9631 n = read16 (ip + 2);
9634 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9640 EMIT_NEW_LOCLOADA (cfg, ins, n);
9649 n = read16 (ip + 2);
9651 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9653 emit_stloc_ir (cfg, sp, header, n);
9660 if (sp != stack_start)
9662 if (cfg->method != method)
9664 * Inlining this into a loop in a parent could lead to
9665 * stack overflows which is different behavior than the
9666 * non-inlined case, thus disable inlining in this case.
9668 goto inline_failure;
9670 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9671 ins->dreg = alloc_preg (cfg);
9672 ins->sreg1 = sp [0]->dreg;
9673 ins->type = STACK_PTR;
9674 MONO_ADD_INS (cfg->cbb, ins);
9676 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9678 ins->flags |= MONO_INST_INIT;
9683 case CEE_ENDFILTER: {
9684 MonoExceptionClause *clause, *nearest;
9685 int cc, nearest_num;
9689 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9691 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9692 ins->sreg1 = (*sp)->dreg;
9693 MONO_ADD_INS (bblock, ins);
9694 start_new_bblock = 1;
9699 for (cc = 0; cc < header->num_clauses; ++cc) {
9700 clause = &header->clauses [cc];
9701 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9702 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9703 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9709 if ((ip - header->code) != nearest->handler_offset)
9714 case CEE_UNALIGNED_:
9715 ins_flag |= MONO_INST_UNALIGNED;
9716 /* FIXME: record alignment? we can assume 1 for now */
9721 ins_flag |= MONO_INST_VOLATILE;
9725 ins_flag |= MONO_INST_TAILCALL;
9726 cfg->flags |= MONO_CFG_HAS_TAIL;
9727 /* Can't inline tail calls at this time */
9728 inline_costs += 100000;
9735 token = read32 (ip + 2);
9736 klass = mini_get_class (method, token, generic_context);
9737 CHECK_TYPELOAD (klass);
9738 if (generic_class_is_reference_type (cfg, klass))
9739 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9741 mini_emit_initobj (cfg, *sp, NULL, klass);
9745 case CEE_CONSTRAINED_:
9747 token = read32 (ip + 2);
9748 if (method->wrapper_type != MONO_WRAPPER_NONE)
9749 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9751 constrained_call = mono_class_get_full (image, token, generic_context);
9752 CHECK_TYPELOAD (constrained_call);
9757 MonoInst *iargs [3];
9761 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9762 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9763 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9764 /* emit_memset only works when val == 0 */
9765 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9770 if (ip [1] == CEE_CPBLK) {
9771 MonoMethod *memcpy_method = get_memcpy_method ();
9772 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9774 MonoMethod *memset_method = get_memset_method ();
9775 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9785 ins_flag |= MONO_INST_NOTYPECHECK;
9787 ins_flag |= MONO_INST_NORANGECHECK;
9788 /* we ignore the no-nullcheck for now since we
9789 * really do it explicitly only when doing callvirt->call
9795 int handler_offset = -1;
9797 for (i = 0; i < header->num_clauses; ++i) {
9798 MonoExceptionClause *clause = &header->clauses [i];
9799 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9800 handler_offset = clause->handler_offset;
9805 bblock->flags |= BB_EXCEPTION_UNSAFE;
9807 g_assert (handler_offset != -1);
9809 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9810 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9811 ins->sreg1 = load->dreg;
9812 MONO_ADD_INS (bblock, ins);
9814 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9815 MONO_ADD_INS (bblock, ins);
9818 link_bblock (cfg, bblock, end_bblock);
9819 start_new_bblock = 1;
9827 CHECK_STACK_OVF (1);
9829 token = read32 (ip + 2);
9830 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9831 MonoType *type = mono_type_create_from_typespec (image, token);
9832 token = mono_type_size (type, &ialign);
9834 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9835 CHECK_TYPELOAD (klass);
9836 mono_class_init (klass);
9837 token = mono_class_value_size (klass, &align);
9839 EMIT_NEW_ICONST (cfg, ins, token);
9844 case CEE_REFANYTYPE: {
9845 MonoInst *src_var, *src;
9851 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9853 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9854 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9855 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9873 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9883 g_warning ("opcode 0x%02x not handled", *ip);
9887 if (start_new_bblock != 1)
9890 bblock->cil_length = ip - bblock->cil_code;
9891 bblock->next_bb = end_bblock;
9893 if (cfg->method == method && cfg->domainvar) {
9895 MonoInst *get_domain;
9897 cfg->cbb = init_localsbb;
9899 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9900 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9903 get_domain->dreg = alloc_preg (cfg);
9904 MONO_ADD_INS (cfg->cbb, get_domain);
9906 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9907 MONO_ADD_INS (cfg->cbb, store);
9910 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9911 if (cfg->compile_aot)
9912 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9913 mono_get_got_var (cfg);
9916 if (cfg->method == method && cfg->got_var)
9917 mono_emit_load_got_addr (cfg);
9922 cfg->cbb = init_localsbb;
9924 for (i = 0; i < header->num_locals; ++i) {
9925 MonoType *ptype = header->locals [i];
9926 int t = ptype->type;
9927 dreg = cfg->locals [i]->dreg;
9929 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9930 t = mono_class_enum_basetype (ptype->data.klass)->type;
9932 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9933 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9934 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9935 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9936 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9937 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9938 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9939 ins->type = STACK_R8;
9940 ins->inst_p0 = (void*)&r8_0;
9941 ins->dreg = alloc_dreg (cfg, STACK_R8);
9942 MONO_ADD_INS (init_localsbb, ins);
9943 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9944 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9945 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9946 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9948 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9953 if (cfg->init_ref_vars && cfg->method == method) {
9954 /* Emit initialization for ref vars */
9955 // FIXME: Avoid duplication initialization for IL locals.
9956 for (i = 0; i < cfg->num_varinfo; ++i) {
9957 MonoInst *ins = cfg->varinfo [i];
9959 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9960 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9964 /* Add a sequence point for method entry/exit events */
9966 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9967 MONO_ADD_INS (init_localsbb, ins);
9968 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9969 MONO_ADD_INS (cfg->bb_exit, ins);
9974 if (cfg->method == method) {
9976 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9977 bb->region = mono_find_block_region (cfg, bb->real_offset);
9979 mono_create_spvar_for_region (cfg, bb->region);
9980 if (cfg->verbose_level > 2)
9981 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9985 g_slist_free (class_inits);
9986 dont_inline = g_list_remove (dont_inline, method);
9988 if (inline_costs < 0) {
9991 /* Method is too large */
9992 mname = mono_method_full_name (method, TRUE);
9993 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9994 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9996 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9997 mono_basic_block_free (original_bb);
10001 if ((cfg->verbose_level > 2) && (cfg->method == method))
10002 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10004 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10005 mono_basic_block_free (original_bb);
10006 return inline_costs;
10009 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10016 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10020 set_exception_type_from_invalid_il (cfg, method, ip);
10024 g_slist_free (class_inits);
10025 mono_basic_block_free (original_bb);
10026 dont_inline = g_list_remove (dont_inline, method);
10027 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10032 store_membase_reg_to_store_membase_imm (int opcode)
10035 case OP_STORE_MEMBASE_REG:
10036 return OP_STORE_MEMBASE_IMM;
10037 case OP_STOREI1_MEMBASE_REG:
10038 return OP_STOREI1_MEMBASE_IMM;
10039 case OP_STOREI2_MEMBASE_REG:
10040 return OP_STOREI2_MEMBASE_IMM;
10041 case OP_STOREI4_MEMBASE_REG:
10042 return OP_STOREI4_MEMBASE_IMM;
10043 case OP_STOREI8_MEMBASE_REG:
10044 return OP_STOREI8_MEMBASE_IMM;
10046 g_assert_not_reached ();
10052 #endif /* DISABLE_JIT */
10055 mono_op_to_op_imm (int opcode)
10059 return OP_IADD_IMM;
10061 return OP_ISUB_IMM;
10063 return OP_IDIV_IMM;
10065 return OP_IDIV_UN_IMM;
10067 return OP_IREM_IMM;
10069 return OP_IREM_UN_IMM;
10071 return OP_IMUL_IMM;
10073 return OP_IAND_IMM;
10077 return OP_IXOR_IMM;
10079 return OP_ISHL_IMM;
10081 return OP_ISHR_IMM;
10083 return OP_ISHR_UN_IMM;
10086 return OP_LADD_IMM;
10088 return OP_LSUB_IMM;
10090 return OP_LAND_IMM;
10094 return OP_LXOR_IMM;
10096 return OP_LSHL_IMM;
10098 return OP_LSHR_IMM;
10100 return OP_LSHR_UN_IMM;
10103 return OP_COMPARE_IMM;
10105 return OP_ICOMPARE_IMM;
10107 return OP_LCOMPARE_IMM;
10109 case OP_STORE_MEMBASE_REG:
10110 return OP_STORE_MEMBASE_IMM;
10111 case OP_STOREI1_MEMBASE_REG:
10112 return OP_STOREI1_MEMBASE_IMM;
10113 case OP_STOREI2_MEMBASE_REG:
10114 return OP_STOREI2_MEMBASE_IMM;
10115 case OP_STOREI4_MEMBASE_REG:
10116 return OP_STOREI4_MEMBASE_IMM;
10118 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10120 return OP_X86_PUSH_IMM;
10121 case OP_X86_COMPARE_MEMBASE_REG:
10122 return OP_X86_COMPARE_MEMBASE_IMM;
10124 #if defined(TARGET_AMD64)
10125 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10126 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10128 case OP_VOIDCALL_REG:
10129 return OP_VOIDCALL;
10137 return OP_LOCALLOC_IMM;
10144 ldind_to_load_membase (int opcode)
10148 return OP_LOADI1_MEMBASE;
10150 return OP_LOADU1_MEMBASE;
10152 return OP_LOADI2_MEMBASE;
10154 return OP_LOADU2_MEMBASE;
10156 return OP_LOADI4_MEMBASE;
10158 return OP_LOADU4_MEMBASE;
10160 return OP_LOAD_MEMBASE;
10161 case CEE_LDIND_REF:
10162 return OP_LOAD_MEMBASE;
10164 return OP_LOADI8_MEMBASE;
10166 return OP_LOADR4_MEMBASE;
10168 return OP_LOADR8_MEMBASE;
10170 g_assert_not_reached ();
10177 stind_to_store_membase (int opcode)
10181 return OP_STOREI1_MEMBASE_REG;
10183 return OP_STOREI2_MEMBASE_REG;
10185 return OP_STOREI4_MEMBASE_REG;
10187 case CEE_STIND_REF:
10188 return OP_STORE_MEMBASE_REG;
10190 return OP_STOREI8_MEMBASE_REG;
10192 return OP_STORER4_MEMBASE_REG;
10194 return OP_STORER8_MEMBASE_REG;
10196 g_assert_not_reached ();
10203 mono_load_membase_to_load_mem (int opcode)
10205 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10206 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10208 case OP_LOAD_MEMBASE:
10209 return OP_LOAD_MEM;
10210 case OP_LOADU1_MEMBASE:
10211 return OP_LOADU1_MEM;
10212 case OP_LOADU2_MEMBASE:
10213 return OP_LOADU2_MEM;
10214 case OP_LOADI4_MEMBASE:
10215 return OP_LOADI4_MEM;
10216 case OP_LOADU4_MEMBASE:
10217 return OP_LOADU4_MEM;
10218 #if SIZEOF_REGISTER == 8
10219 case OP_LOADI8_MEMBASE:
10220 return OP_LOADI8_MEM;
10229 op_to_op_dest_membase (int store_opcode, int opcode)
10231 #if defined(TARGET_X86)
10232 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10237 return OP_X86_ADD_MEMBASE_REG;
10239 return OP_X86_SUB_MEMBASE_REG;
10241 return OP_X86_AND_MEMBASE_REG;
10243 return OP_X86_OR_MEMBASE_REG;
10245 return OP_X86_XOR_MEMBASE_REG;
10248 return OP_X86_ADD_MEMBASE_IMM;
10251 return OP_X86_SUB_MEMBASE_IMM;
10254 return OP_X86_AND_MEMBASE_IMM;
10257 return OP_X86_OR_MEMBASE_IMM;
10260 return OP_X86_XOR_MEMBASE_IMM;
10266 #if defined(TARGET_AMD64)
10267 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10272 return OP_X86_ADD_MEMBASE_REG;
10274 return OP_X86_SUB_MEMBASE_REG;
10276 return OP_X86_AND_MEMBASE_REG;
10278 return OP_X86_OR_MEMBASE_REG;
10280 return OP_X86_XOR_MEMBASE_REG;
10282 return OP_X86_ADD_MEMBASE_IMM;
10284 return OP_X86_SUB_MEMBASE_IMM;
10286 return OP_X86_AND_MEMBASE_IMM;
10288 return OP_X86_OR_MEMBASE_IMM;
10290 return OP_X86_XOR_MEMBASE_IMM;
10292 return OP_AMD64_ADD_MEMBASE_REG;
10294 return OP_AMD64_SUB_MEMBASE_REG;
10296 return OP_AMD64_AND_MEMBASE_REG;
10298 return OP_AMD64_OR_MEMBASE_REG;
10300 return OP_AMD64_XOR_MEMBASE_REG;
10303 return OP_AMD64_ADD_MEMBASE_IMM;
10306 return OP_AMD64_SUB_MEMBASE_IMM;
10309 return OP_AMD64_AND_MEMBASE_IMM;
10312 return OP_AMD64_OR_MEMBASE_IMM;
10315 return OP_AMD64_XOR_MEMBASE_IMM;
10325 op_to_op_store_membase (int store_opcode, int opcode)
10327 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10330 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10331 return OP_X86_SETEQ_MEMBASE;
10333 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10334 return OP_X86_SETNE_MEMBASE;
10342 op_to_op_src1_membase (int load_opcode, int opcode)
10345 /* FIXME: This has sign extension issues */
10347 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10348 return OP_X86_COMPARE_MEMBASE8_IMM;
10351 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10356 return OP_X86_PUSH_MEMBASE;
10357 case OP_COMPARE_IMM:
10358 case OP_ICOMPARE_IMM:
10359 return OP_X86_COMPARE_MEMBASE_IMM;
10362 return OP_X86_COMPARE_MEMBASE_REG;
10366 #ifdef TARGET_AMD64
10367 /* FIXME: This has sign extension issues */
10369 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10370 return OP_X86_COMPARE_MEMBASE8_IMM;
10375 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10376 return OP_X86_PUSH_MEMBASE;
10378 /* FIXME: This only works for 32 bit immediates
10379 case OP_COMPARE_IMM:
10380 case OP_LCOMPARE_IMM:
10381 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10382 return OP_AMD64_COMPARE_MEMBASE_IMM;
10384 case OP_ICOMPARE_IMM:
10385 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10386 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10390 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10391 return OP_AMD64_COMPARE_MEMBASE_REG;
10394 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10395 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10404 op_to_op_src2_membase (int load_opcode, int opcode)
10407 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10413 return OP_X86_COMPARE_REG_MEMBASE;
10415 return OP_X86_ADD_REG_MEMBASE;
10417 return OP_X86_SUB_REG_MEMBASE;
10419 return OP_X86_AND_REG_MEMBASE;
10421 return OP_X86_OR_REG_MEMBASE;
10423 return OP_X86_XOR_REG_MEMBASE;
10427 #ifdef TARGET_AMD64
10430 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10431 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10435 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10436 return OP_AMD64_COMPARE_REG_MEMBASE;
10439 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10440 return OP_X86_ADD_REG_MEMBASE;
10442 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10443 return OP_X86_SUB_REG_MEMBASE;
10445 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10446 return OP_X86_AND_REG_MEMBASE;
10448 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10449 return OP_X86_OR_REG_MEMBASE;
10451 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10452 return OP_X86_XOR_REG_MEMBASE;
10454 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10455 return OP_AMD64_ADD_REG_MEMBASE;
10457 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10458 return OP_AMD64_SUB_REG_MEMBASE;
10460 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10461 return OP_AMD64_AND_REG_MEMBASE;
10463 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10464 return OP_AMD64_OR_REG_MEMBASE;
10466 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10467 return OP_AMD64_XOR_REG_MEMBASE;
10475 mono_op_to_op_imm_noemul (int opcode)
10478 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10484 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10492 return mono_op_to_op_imm (opcode);
10496 #ifndef DISABLE_JIT
10499 * mono_handle_global_vregs:
10501 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10505 mono_handle_global_vregs (MonoCompile *cfg)
10507 gint32 *vreg_to_bb;
10508 MonoBasicBlock *bb;
10511 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10513 #ifdef MONO_ARCH_SIMD_INTRINSICS
10514 if (cfg->uses_simd_intrinsics)
10515 mono_simd_simplify_indirection (cfg);
10518 /* Find local vregs used in more than one bb */
10519 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10520 MonoInst *ins = bb->code;
10521 int block_num = bb->block_num;
10523 if (cfg->verbose_level > 2)
10524 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10527 for (; ins; ins = ins->next) {
10528 const char *spec = INS_INFO (ins->opcode);
10529 int regtype = 0, regindex;
10532 if (G_UNLIKELY (cfg->verbose_level > 2))
10533 mono_print_ins (ins);
10535 g_assert (ins->opcode >= MONO_CEE_LAST);
10537 for (regindex = 0; regindex < 4; regindex ++) {
10540 if (regindex == 0) {
10541 regtype = spec [MONO_INST_DEST];
10542 if (regtype == ' ')
10545 } else if (regindex == 1) {
10546 regtype = spec [MONO_INST_SRC1];
10547 if (regtype == ' ')
10550 } else if (regindex == 2) {
10551 regtype = spec [MONO_INST_SRC2];
10552 if (regtype == ' ')
10555 } else if (regindex == 3) {
10556 regtype = spec [MONO_INST_SRC3];
10557 if (regtype == ' ')
10562 #if SIZEOF_REGISTER == 4
10563 /* In the LLVM case, the long opcodes are not decomposed */
10564 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10566 * Since some instructions reference the original long vreg,
10567 * and some reference the two component vregs, it is quite hard
10568 * to determine when it needs to be global. So be conservative.
10570 if (!get_vreg_to_inst (cfg, vreg)) {
10571 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10573 if (cfg->verbose_level > 2)
10574 printf ("LONG VREG R%d made global.\n", vreg);
10578 * Make the component vregs volatile since the optimizations can
10579 * get confused otherwise.
10581 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10582 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10586 g_assert (vreg != -1);
10588 prev_bb = vreg_to_bb [vreg];
10589 if (prev_bb == 0) {
10590 /* 0 is a valid block num */
10591 vreg_to_bb [vreg] = block_num + 1;
10592 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10593 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10596 if (!get_vreg_to_inst (cfg, vreg)) {
10597 if (G_UNLIKELY (cfg->verbose_level > 2))
10598 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10602 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10605 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10608 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10611 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10614 g_assert_not_reached ();
10618 /* Flag as having been used in more than one bb */
10619 vreg_to_bb [vreg] = -1;
10625 /* If a variable is used in only one bblock, convert it into a local vreg */
10626 for (i = 0; i < cfg->num_varinfo; i++) {
10627 MonoInst *var = cfg->varinfo [i];
10628 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10630 switch (var->type) {
10636 #if SIZEOF_REGISTER == 8
10639 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10640 /* Enabling this screws up the fp stack on x86 */
10643 /* Arguments are implicitly global */
10644 /* Putting R4 vars into registers doesn't work currently */
10645 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10647 * Make that the variable's liveness interval doesn't contain a call, since
10648 * that would cause the lvreg to be spilled, making the whole optimization
10651 /* This is too slow for JIT compilation */
10653 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10655 int def_index, call_index, ins_index;
10656 gboolean spilled = FALSE;
10661 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10662 const char *spec = INS_INFO (ins->opcode);
10664 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10665 def_index = ins_index;
10667 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10668 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10669 if (call_index > def_index) {
10675 if (MONO_IS_CALL (ins))
10676 call_index = ins_index;
10686 if (G_UNLIKELY (cfg->verbose_level > 2))
10687 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10688 var->flags |= MONO_INST_IS_DEAD;
10689 cfg->vreg_to_inst [var->dreg] = NULL;
10696 * Compress the varinfo and vars tables so the liveness computation is faster and
10697 * takes up less space.
10700 for (i = 0; i < cfg->num_varinfo; ++i) {
10701 MonoInst *var = cfg->varinfo [i];
10702 if (pos < i && cfg->locals_start == i)
10703 cfg->locals_start = pos;
10704 if (!(var->flags & MONO_INST_IS_DEAD)) {
10706 cfg->varinfo [pos] = cfg->varinfo [i];
10707 cfg->varinfo [pos]->inst_c0 = pos;
10708 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10709 cfg->vars [pos].idx = pos;
10710 #if SIZEOF_REGISTER == 4
10711 if (cfg->varinfo [pos]->type == STACK_I8) {
10712 /* Modify the two component vars too */
10715 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10716 var1->inst_c0 = pos;
10717 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10718 var1->inst_c0 = pos;
10725 cfg->num_varinfo = pos;
10726 if (cfg->locals_start > cfg->num_varinfo)
10727 cfg->locals_start = cfg->num_varinfo;
10731 * mono_spill_global_vars:
10733 * Generate spill code for variables which are not allocated to registers,
10734 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10735 * code is generated which could be optimized by the local optimization passes.
10738 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10740 MonoBasicBlock *bb;
10742 int orig_next_vreg;
10743 guint32 *vreg_to_lvreg;
10745 guint32 i, lvregs_len;
10746 gboolean dest_has_lvreg = FALSE;
10747 guint32 stacktypes [128];
10748 MonoInst **live_range_start, **live_range_end;
10749 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10751 *need_local_opts = FALSE;
10753 memset (spec2, 0, sizeof (spec2));
10755 /* FIXME: Move this function to mini.c */
10756 stacktypes ['i'] = STACK_PTR;
10757 stacktypes ['l'] = STACK_I8;
10758 stacktypes ['f'] = STACK_R8;
10759 #ifdef MONO_ARCH_SIMD_INTRINSICS
10760 stacktypes ['x'] = STACK_VTYPE;
10763 #if SIZEOF_REGISTER == 4
10764 /* Create MonoInsts for longs */
10765 for (i = 0; i < cfg->num_varinfo; i++) {
10766 MonoInst *ins = cfg->varinfo [i];
10768 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10769 switch (ins->type) {
10774 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10777 g_assert (ins->opcode == OP_REGOFFSET);
10779 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10781 tree->opcode = OP_REGOFFSET;
10782 tree->inst_basereg = ins->inst_basereg;
10783 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10785 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10787 tree->opcode = OP_REGOFFSET;
10788 tree->inst_basereg = ins->inst_basereg;
10789 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10799 /* FIXME: widening and truncation */
10802 * As an optimization, when a variable allocated to the stack is first loaded into
10803 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10804 * the variable again.
10806 orig_next_vreg = cfg->next_vreg;
10807 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10808 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10812 * These arrays contain the first and last instructions accessing a given
10814 * Since we emit bblocks in the same order we process them here, and we
10815 * don't split live ranges, these will precisely describe the live range of
10816 * the variable, i.e. the instruction range where a valid value can be found
10817 * in the variables location.
10818 * The live range is computed using the liveness info computed by the liveness pass.
10819 * We can't use vmv->range, since that is an abstract live range, and we need
10820 * one which is instruction precise.
10821 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10823 /* FIXME: Only do this if debugging info is requested */
10824 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10825 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10826 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10827 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10829 /* Add spill loads/stores */
10830 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10833 if (cfg->verbose_level > 2)
10834 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10836 /* Clear vreg_to_lvreg array */
10837 for (i = 0; i < lvregs_len; i++)
10838 vreg_to_lvreg [lvregs [i]] = 0;
10842 MONO_BB_FOR_EACH_INS (bb, ins) {
10843 const char *spec = INS_INFO (ins->opcode);
10844 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10845 gboolean store, no_lvreg;
10846 int sregs [MONO_MAX_SRC_REGS];
10848 if (G_UNLIKELY (cfg->verbose_level > 2))
10849 mono_print_ins (ins);
10851 if (ins->opcode == OP_NOP)
10855 * We handle LDADDR here as well, since it can only be decomposed
10856 * when variable addresses are known.
10858 if (ins->opcode == OP_LDADDR) {
10859 MonoInst *var = ins->inst_p0;
10861 if (var->opcode == OP_VTARG_ADDR) {
10862 /* Happens on SPARC/S390 where vtypes are passed by reference */
10863 MonoInst *vtaddr = var->inst_left;
10864 if (vtaddr->opcode == OP_REGVAR) {
10865 ins->opcode = OP_MOVE;
10866 ins->sreg1 = vtaddr->dreg;
10868 else if (var->inst_left->opcode == OP_REGOFFSET) {
10869 ins->opcode = OP_LOAD_MEMBASE;
10870 ins->inst_basereg = vtaddr->inst_basereg;
10871 ins->inst_offset = vtaddr->inst_offset;
10875 g_assert (var->opcode == OP_REGOFFSET);
10877 ins->opcode = OP_ADD_IMM;
10878 ins->sreg1 = var->inst_basereg;
10879 ins->inst_imm = var->inst_offset;
10882 *need_local_opts = TRUE;
10883 spec = INS_INFO (ins->opcode);
10886 if (ins->opcode < MONO_CEE_LAST) {
10887 mono_print_ins (ins);
10888 g_assert_not_reached ();
10892 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10896 if (MONO_IS_STORE_MEMBASE (ins)) {
10897 tmp_reg = ins->dreg;
10898 ins->dreg = ins->sreg2;
10899 ins->sreg2 = tmp_reg;
10902 spec2 [MONO_INST_DEST] = ' ';
10903 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10904 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10905 spec2 [MONO_INST_SRC3] = ' ';
10907 } else if (MONO_IS_STORE_MEMINDEX (ins))
10908 g_assert_not_reached ();
10913 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10914 printf ("\t %.3s %d", spec, ins->dreg);
10915 num_sregs = mono_inst_get_src_registers (ins, sregs);
10916 for (srcindex = 0; srcindex < 3; ++srcindex)
10917 printf (" %d", sregs [srcindex]);
10924 regtype = spec [MONO_INST_DEST];
10925 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10928 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10929 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10930 MonoInst *store_ins;
10932 MonoInst *def_ins = ins;
10933 int dreg = ins->dreg; /* The original vreg */
10935 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10937 if (var->opcode == OP_REGVAR) {
10938 ins->dreg = var->dreg;
10939 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10941 * Instead of emitting a load+store, use a _membase opcode.
10943 g_assert (var->opcode == OP_REGOFFSET);
10944 if (ins->opcode == OP_MOVE) {
10948 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10949 ins->inst_basereg = var->inst_basereg;
10950 ins->inst_offset = var->inst_offset;
10953 spec = INS_INFO (ins->opcode);
10957 g_assert (var->opcode == OP_REGOFFSET);
10959 prev_dreg = ins->dreg;
10961 /* Invalidate any previous lvreg for this vreg */
10962 vreg_to_lvreg [ins->dreg] = 0;
10966 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10968 store_opcode = OP_STOREI8_MEMBASE_REG;
10971 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10973 if (regtype == 'l') {
10974 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10975 mono_bblock_insert_after_ins (bb, ins, store_ins);
10976 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10977 mono_bblock_insert_after_ins (bb, ins, store_ins);
10978 def_ins = store_ins;
10981 g_assert (store_opcode != OP_STOREV_MEMBASE);
10983 /* Try to fuse the store into the instruction itself */
10984 /* FIXME: Add more instructions */
10985 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10986 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10987 ins->inst_imm = ins->inst_c0;
10988 ins->inst_destbasereg = var->inst_basereg;
10989 ins->inst_offset = var->inst_offset;
10990 spec = INS_INFO (ins->opcode);
10991 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10992 ins->opcode = store_opcode;
10993 ins->inst_destbasereg = var->inst_basereg;
10994 ins->inst_offset = var->inst_offset;
10998 tmp_reg = ins->dreg;
10999 ins->dreg = ins->sreg2;
11000 ins->sreg2 = tmp_reg;
11003 spec2 [MONO_INST_DEST] = ' ';
11004 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11005 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11006 spec2 [MONO_INST_SRC3] = ' ';
11008 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11009 // FIXME: The backends expect the base reg to be in inst_basereg
11010 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11012 ins->inst_basereg = var->inst_basereg;
11013 ins->inst_offset = var->inst_offset;
11014 spec = INS_INFO (ins->opcode);
11016 /* printf ("INS: "); mono_print_ins (ins); */
11017 /* Create a store instruction */
11018 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11020 /* Insert it after the instruction */
11021 mono_bblock_insert_after_ins (bb, ins, store_ins);
11023 def_ins = store_ins;
11026 * We can't assign ins->dreg to var->dreg here, since the
11027 * sregs could use it. So set a flag, and do it after
11030 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11031 dest_has_lvreg = TRUE;
11036 if (def_ins && !live_range_start [dreg]) {
11037 live_range_start [dreg] = def_ins;
11038 live_range_start_bb [dreg] = bb;
11045 num_sregs = mono_inst_get_src_registers (ins, sregs);
11046 for (srcindex = 0; srcindex < 3; ++srcindex) {
11047 regtype = spec [MONO_INST_SRC1 + srcindex];
11048 sreg = sregs [srcindex];
11050 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11051 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11052 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11053 MonoInst *use_ins = ins;
11054 MonoInst *load_ins;
11055 guint32 load_opcode;
11057 if (var->opcode == OP_REGVAR) {
11058 sregs [srcindex] = var->dreg;
11059 //mono_inst_set_src_registers (ins, sregs);
11060 live_range_end [sreg] = use_ins;
11061 live_range_end_bb [sreg] = bb;
11065 g_assert (var->opcode == OP_REGOFFSET);
11067 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11069 g_assert (load_opcode != OP_LOADV_MEMBASE);
11071 if (vreg_to_lvreg [sreg]) {
11072 g_assert (vreg_to_lvreg [sreg] != -1);
11074 /* The variable is already loaded to an lvreg */
11075 if (G_UNLIKELY (cfg->verbose_level > 2))
11076 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11077 sregs [srcindex] = vreg_to_lvreg [sreg];
11078 //mono_inst_set_src_registers (ins, sregs);
11082 /* Try to fuse the load into the instruction */
11083 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11084 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11085 sregs [0] = var->inst_basereg;
11086 //mono_inst_set_src_registers (ins, sregs);
11087 ins->inst_offset = var->inst_offset;
11088 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11089 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11090 sregs [1] = var->inst_basereg;
11091 //mono_inst_set_src_registers (ins, sregs);
11092 ins->inst_offset = var->inst_offset;
11094 if (MONO_IS_REAL_MOVE (ins)) {
11095 ins->opcode = OP_NOP;
11098 //printf ("%d ", srcindex); mono_print_ins (ins);
11100 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11102 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11103 if (var->dreg == prev_dreg) {
11105 * sreg refers to the value loaded by the load
11106 * emitted below, but we need to use ins->dreg
11107 * since it refers to the store emitted earlier.
11111 g_assert (sreg != -1);
11112 vreg_to_lvreg [var->dreg] = sreg;
11113 g_assert (lvregs_len < 1024);
11114 lvregs [lvregs_len ++] = var->dreg;
11118 sregs [srcindex] = sreg;
11119 //mono_inst_set_src_registers (ins, sregs);
11121 if (regtype == 'l') {
11122 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11123 mono_bblock_insert_before_ins (bb, ins, load_ins);
11124 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11125 mono_bblock_insert_before_ins (bb, ins, load_ins);
11126 use_ins = load_ins;
11129 #if SIZEOF_REGISTER == 4
11130 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11132 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11133 mono_bblock_insert_before_ins (bb, ins, load_ins);
11134 use_ins = load_ins;
11138 if (var->dreg < orig_next_vreg) {
11139 live_range_end [var->dreg] = use_ins;
11140 live_range_end_bb [var->dreg] = bb;
11144 mono_inst_set_src_registers (ins, sregs);
11146 if (dest_has_lvreg) {
11147 g_assert (ins->dreg != -1);
11148 vreg_to_lvreg [prev_dreg] = ins->dreg;
11149 g_assert (lvregs_len < 1024);
11150 lvregs [lvregs_len ++] = prev_dreg;
11151 dest_has_lvreg = FALSE;
11155 tmp_reg = ins->dreg;
11156 ins->dreg = ins->sreg2;
11157 ins->sreg2 = tmp_reg;
11160 if (MONO_IS_CALL (ins)) {
11161 /* Clear vreg_to_lvreg array */
11162 for (i = 0; i < lvregs_len; i++)
11163 vreg_to_lvreg [lvregs [i]] = 0;
11165 } else if (ins->opcode == OP_NOP) {
11167 MONO_INST_NULLIFY_SREGS (ins);
11170 if (cfg->verbose_level > 2)
11171 mono_print_ins_index (1, ins);
11174 /* Extend the live range based on the liveness info */
11175 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11176 for (i = 0; i < cfg->num_varinfo; i ++) {
11177 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11179 if (vreg_is_volatile (cfg, vi->vreg))
11180 /* The liveness info is incomplete */
11183 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11184 /* Live from at least the first ins of this bb */
11185 live_range_start [vi->vreg] = bb->code;
11186 live_range_start_bb [vi->vreg] = bb;
11189 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11190 /* Live at least until the last ins of this bb */
11191 live_range_end [vi->vreg] = bb->last_ins;
11192 live_range_end_bb [vi->vreg] = bb;
11198 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11200 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11201 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11203 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11204 for (i = 0; i < cfg->num_varinfo; ++i) {
11205 int vreg = MONO_VARINFO (cfg, i)->vreg;
11208 if (live_range_start [vreg]) {
11209 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11211 ins->inst_c1 = vreg;
11212 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11214 if (live_range_end [vreg]) {
11215 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11217 ins->inst_c1 = vreg;
11218 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11219 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11221 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11227 g_free (live_range_start);
11228 g_free (live_range_end);
11229 g_free (live_range_start_bb);
11230 g_free (live_range_end_bb);
11235 * - use 'iadd' instead of 'int_add'
11236 * - handling ovf opcodes: decompose in method_to_ir.
11237 * - unify iregs/fregs
11238 * -> partly done, the missing parts are:
11239 * - a more complete unification would involve unifying the hregs as well, so
11240 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11241 * would no longer map to the machine hregs, so the code generators would need to
11242 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11243 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11244 * fp/non-fp branches speeds it up by about 15%.
11245 * - use sext/zext opcodes instead of shifts
11247 * - get rid of TEMPLOADs if possible and use vregs instead
11248 * - clean up usage of OP_P/OP_ opcodes
11249 * - cleanup usage of DUMMY_USE
11250 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11252 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11253 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11254 * - make sure handle_stack_args () is called before the branch is emitted
11255 * - when the new IR is done, get rid of all unused stuff
11256 * - COMPARE/BEQ as separate instructions or unify them ?
11257 * - keeping them separate allows specialized compare instructions like
11258 * compare_imm, compare_membase
11259 * - most back ends unify fp compare+branch, fp compare+ceq
11260 * - integrate mono_save_args into inline_method
11261 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11262 * - handle long shift opts on 32 bit platforms somehow: they require
11263 * 3 sregs (2 for arg1 and 1 for arg2)
11264 * - make byref a 'normal' type.
11265 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11266 * variable if needed.
11267 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11268 * like inline_method.
11269 * - remove inlining restrictions
11270 * - fix LNEG and enable cfold of INEG
11271 * - generalize x86 optimizations like ldelema as a peephole optimization
11272 * - add store_mem_imm for amd64
11273 * - optimize the loading of the interruption flag in the managed->native wrappers
11274 * - avoid special handling of OP_NOP in passes
11275 * - move code inserting instructions into one function/macro.
11276 * - try a coalescing phase after liveness analysis
11277 * - add float -> vreg conversion + local optimizations on !x86
11278 * - figure out how to handle decomposed branches during optimizations, ie.
11279 * compare+branch, op_jump_table+op_br etc.
11280 * - promote RuntimeXHandles to vregs
11281 * - vtype cleanups:
11282 * - add a NEW_VARLOADA_VREG macro
11283 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11284 * accessing vtype fields.
11285 * - get rid of I8CONST on 64 bit platforms
11286 * - dealing with the increase in code size due to branches created during opcode
11288 * - use extended basic blocks
11289 * - all parts of the JIT
11290 * - handle_global_vregs () && local regalloc
11291 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11292 * - sources of increase in code size:
11295 * - isinst and castclass
11296 * - lvregs not allocated to global registers even if used multiple times
11297 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11299 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11300 * - add all micro optimizations from the old JIT
11301 * - put tree optimizations into the deadce pass
11302 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11303 * specific function.
11304 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11305 * fcompare + branchCC.
11306 * - create a helper function for allocating a stack slot, taking into account
11307 * MONO_CFG_HAS_SPILLUP.
11309 * - merge the ia64 switch changes.
11310 * - optimize mono_regstate2_alloc_int/float.
11311 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11312 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11313 * parts of the tree could be separated by other instructions, killing the tree
11314 * arguments, or stores killing loads etc. Also, should we fold loads into other
11315 * instructions if the result of the load is used multiple times ?
11316 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11317 * - LAST MERGE: 108395.
11318 * - when returning vtypes in registers, generate IR and append it to the end of the
11319 * last bb instead of doing it in the epilog.
11320 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11328 - When to decompose opcodes:
11329 - earlier: this makes some optimizations hard to implement, since the low level IR
11330 no longer contains the neccessary information. But it is easier to do.
11331 - later: harder to implement, enables more optimizations.
11332 - Branches inside bblocks:
11333 - created when decomposing complex opcodes.
11334 - branches to another bblock: harmless, but not tracked by the branch
11335 optimizations, so need to branch to a label at the start of the bblock.
11336 - branches to inside the same bblock: very problematic, trips up the local
11337 reg allocator. Can be fixed by spitting the current bblock, but that is a
11338 complex operation, since some local vregs can become global vregs etc.
11339 - Local/global vregs:
11340 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11341 local register allocator.
11342 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11343 structure, created by mono_create_var (). Assigned to hregs or the stack by
11344 the global register allocator.
11345 - When to do optimizations like alu->alu_imm:
11346 - earlier -> saves work later on since the IR will be smaller/simpler
11347 - later -> can work on more instructions
11348 - Handling of valuetypes:
11349 - When a vtype is pushed on the stack, a new temporary is created, an
11350 instruction computing its address (LDADDR) is emitted and pushed on
11351 the stack. Need to optimize cases when the vtype is used immediately as in
11352 argument passing, stloc etc.
11353 - Instead of the to_end stuff in the old JIT, simply call the function handling
11354 the values on the stack before emitting the last instruction of the bb.
11357 #endif /* DISABLE_JIT */