2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2597 int card_table_shift_bits;
2598 gpointer card_table_mask;
2599 guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2600 MonoInst *dummy_use;
2602 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2603 int nursery_shift_bits;
2604 size_t nursery_size;
2606 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2608 if (!cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2611 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2612 wbarrier->sreg1 = ptr->dreg;
2614 wbarrier->sreg2 = value->dreg;
2616 wbarrier->sreg2 = value_reg;
2617 MONO_ADD_INS (cfg->cbb, wbarrier);
2621 int offset_reg = alloc_preg (cfg);
2622 int card_reg = alloc_preg (cfg);
2625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2626 if (card_table_mask)
2627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2629 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2630 * IMM's larger than 32bits.
2632 if (cfg->compile_aot) {
2633 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2635 MONO_INST_NEW (cfg, ins, OP_PCONST);
2636 ins->inst_p0 = card_table;
2637 ins->dreg = card_reg;
2638 MONO_ADD_INS (cfg->cbb, ins);
2641 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2644 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2645 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2649 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2651 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2652 dummy_use->sreg1 = value_reg;
2653 MONO_ADD_INS (cfg->cbb, dummy_use);
2659 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2661 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2662 unsigned need_wb = 0;
2667 /*types with references can't have alignment smaller than sizeof(void*) */
2668 if (align < SIZEOF_VOID_P)
2671 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2672 if (size > 32 * SIZEOF_VOID_P)
2675 create_write_barrier_bitmap (klass, &need_wb, 0);
2677 /* We don't unroll more than 5 stores to avoid code bloat. */
2678 if (size > 5 * SIZEOF_VOID_P) {
2679 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2680 size += (SIZEOF_VOID_P - 1);
2681 size &= ~(SIZEOF_VOID_P - 1);
2683 EMIT_NEW_ICONST (cfg, iargs [2], size);
2684 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2685 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2689 destreg = iargs [0]->dreg;
2690 srcreg = iargs [1]->dreg;
2693 dest_ptr_reg = alloc_preg (cfg);
2694 tmp_reg = alloc_preg (cfg);
2697 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2699 while (size >= SIZEOF_VOID_P) {
2700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2704 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2706 offset += SIZEOF_VOID_P;
2707 size -= SIZEOF_VOID_P;
2710 /*tmp += sizeof (void*)*/
2711 if (size >= SIZEOF_VOID_P) {
2712 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2713 MONO_ADD_INS (cfg->cbb, iargs [0]);
2717 /* Those cannot be references since size < sizeof (void*) */
2719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2727 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2743 * Emit code to copy a valuetype of type @klass whose address is stored in
2744 * @src->dreg to memory whose address is stored at @dest->dreg.
2747 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2749 MonoInst *iargs [4];
2752 MonoMethod *memcpy_method;
2756 * This check breaks with spilled vars... need to handle it during verification anyway.
2757 * g_assert (klass && klass == src->klass && klass == dest->klass);
2761 n = mono_class_native_size (klass, &align);
2763 n = mono_class_value_size (klass, &align);
2765 /* if native is true there should be no references in the struct */
2766 if (cfg->gen_write_barriers && klass->has_references && !native) {
2767 /* Avoid barriers when storing to the stack */
2768 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2769 (dest->opcode == OP_LDADDR))) {
2770 int context_used = 0;
2775 if (cfg->generic_sharing_context)
2776 context_used = mono_class_check_context_used (klass);
2778 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2779 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2781 } else if (context_used) {
2782 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2784 if (cfg->compile_aot) {
2785 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2787 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2788 mono_class_compute_gc_descriptor (klass);
2792 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2797 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2798 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2799 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2803 EMIT_NEW_ICONST (cfg, iargs [2], n);
2805 memcpy_method = get_memcpy_method ();
2806 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2811 get_memset_method (void)
2813 static MonoMethod *memset_method = NULL;
2814 if (!memset_method) {
2815 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2817 g_error ("Old corlib found. Install a new one");
2819 return memset_method;
2823 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2825 MonoInst *iargs [3];
2828 MonoMethod *memset_method;
2830 /* FIXME: Optimize this for the case when dest is an LDADDR */
2832 mono_class_init (klass);
2833 n = mono_class_value_size (klass, &align);
2835 if (n <= sizeof (gpointer) * 5) {
2836 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2839 memset_method = get_memset_method ();
2841 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2842 EMIT_NEW_ICONST (cfg, iargs [2], n);
2843 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2848 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2850 MonoInst *this = NULL;
2852 g_assert (cfg->generic_sharing_context);
2854 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2855 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2856 !method->klass->valuetype)
2857 EMIT_NEW_ARGLOAD (cfg, this, 0);
2859 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2860 MonoInst *mrgctx_loc, *mrgctx_var;
2863 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2865 mrgctx_loc = mono_get_vtable_var (cfg);
2866 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2869 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2870 MonoInst *vtable_loc, *vtable_var;
2874 vtable_loc = mono_get_vtable_var (cfg);
2875 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2877 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2878 MonoInst *mrgctx_var = vtable_var;
2881 vtable_reg = alloc_preg (cfg);
2882 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2883 vtable_var->type = STACK_PTR;
2889 int vtable_reg, res_reg;
2891 vtable_reg = alloc_preg (cfg);
2892 res_reg = alloc_preg (cfg);
2893 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2898 static MonoJumpInfoRgctxEntry *
2899 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2901 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2902 res->method = method;
2903 res->in_mrgctx = in_mrgctx;
2904 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2905 res->data->type = patch_type;
2906 res->data->data.target = patch_data;
2907 res->info_type = info_type;
2912 static inline MonoInst*
2913 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2915 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2919 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2920 MonoClass *klass, int rgctx_type)
2922 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2923 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2925 return emit_rgctx_fetch (cfg, rgctx, entry);
2929 * emit_get_rgctx_method:
2931 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2932 * normal constants, else emit a load from the rgctx.
2935 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2936 MonoMethod *cmethod, int rgctx_type)
2938 if (!context_used) {
2941 switch (rgctx_type) {
2942 case MONO_RGCTX_INFO_METHOD:
2943 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2945 case MONO_RGCTX_INFO_METHOD_RGCTX:
2946 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2949 g_assert_not_reached ();
2952 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2953 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2955 return emit_rgctx_fetch (cfg, rgctx, entry);
2960 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2961 MonoClassField *field, int rgctx_type)
2963 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2964 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2966 return emit_rgctx_fetch (cfg, rgctx, entry);
2970 * On return the caller must check @klass for load errors.
2973 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2975 MonoInst *vtable_arg;
2977 int context_used = 0;
2979 if (cfg->generic_sharing_context)
2980 context_used = mono_class_check_context_used (klass);
2983 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2984 klass, MONO_RGCTX_INFO_VTABLE);
2986 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2990 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2993 if (COMPILE_LLVM (cfg))
2994 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2996 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2997 #ifdef MONO_ARCH_VTABLE_REG
2998 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2999 cfg->uses_vtable_reg = TRUE;
3006 * On return the caller must check @array_class for load errors
3009 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3011 int vtable_reg = alloc_preg (cfg);
3012 int context_used = 0;
3014 if (cfg->generic_sharing_context)
3015 context_used = mono_class_check_context_used (array_class);
3017 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3019 if (cfg->opt & MONO_OPT_SHARED) {
3020 int class_reg = alloc_preg (cfg);
3021 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3022 if (cfg->compile_aot) {
3023 int klass_reg = alloc_preg (cfg);
3024 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3025 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3029 } else if (context_used) {
3030 MonoInst *vtable_ins;
3032 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3033 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3035 if (cfg->compile_aot) {
3039 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3041 vt_reg = alloc_preg (cfg);
3042 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3043 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3046 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3052 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3056 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3058 if (mini_get_debug_options ()->better_cast_details) {
3059 int to_klass_reg = alloc_preg (cfg);
3060 int vtable_reg = alloc_preg (cfg);
3061 int klass_reg = alloc_preg (cfg);
3062 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3065 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3069 MONO_ADD_INS (cfg->cbb, tls_get);
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3073 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3074 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3075 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3080 reset_cast_details (MonoCompile *cfg)
3082 /* Reset the variables holding the cast details */
3083 if (mini_get_debug_options ()->better_cast_details) {
3084 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3086 MONO_ADD_INS (cfg->cbb, tls_get);
3087 /* It is enough to reset the from field */
3088 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3093 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3094 * generic code is generated.
3097 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3099 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3102 MonoInst *rgctx, *addr;
3104 /* FIXME: What if the class is shared? We might not
3105 have to get the address of the method from the
3107 addr = emit_get_rgctx_method (cfg, context_used, method,
3108 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3110 rgctx = emit_get_rgctx (cfg, method, context_used);
3112 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3114 return mono_emit_method_call (cfg, method, &val, NULL);
3119 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3123 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3124 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3125 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3126 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3128 obj_reg = sp [0]->dreg;
3129 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3130 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3132 /* FIXME: generics */
3133 g_assert (klass->rank == 0);
3136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3137 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3143 MonoInst *element_class;
3145 /* This assertion is from the unboxcast insn */
3146 g_assert (klass->rank == 0);
3148 element_class = emit_get_rgctx_klass (cfg, context_used,
3149 klass->element_class, MONO_RGCTX_INFO_KLASS);
3151 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3152 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3154 save_cast_details (cfg, klass->element_class, obj_reg);
3155 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3156 reset_cast_details (cfg);
3159 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3160 MONO_ADD_INS (cfg->cbb, add);
3161 add->type = STACK_MP;
3168 * Returns NULL and set the cfg exception on error.
3171 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3173 MonoInst *iargs [2];
3179 MonoInst *iargs [2];
3182 FIXME: we cannot get managed_alloc here because we can't get
3183 the class's vtable (because it's not a closed class)
3185 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3186 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3189 if (cfg->opt & MONO_OPT_SHARED)
3190 rgctx_info = MONO_RGCTX_INFO_KLASS;
3192 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3193 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3195 if (cfg->opt & MONO_OPT_SHARED) {
3196 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3198 alloc_ftn = mono_object_new;
3201 alloc_ftn = mono_object_new_specific;
3204 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3207 if (cfg->opt & MONO_OPT_SHARED) {
3208 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3209 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3211 alloc_ftn = mono_object_new;
3212 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3213 /* This happens often in argument checking code, eg. throw new FooException... */
3214 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3215 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3216 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3218 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3219 MonoMethod *managed_alloc = NULL;
3223 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3224 cfg->exception_ptr = klass;
3228 #ifndef MONO_CROSS_COMPILE
3229 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3232 if (managed_alloc) {
3233 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3234 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3236 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3238 guint32 lw = vtable->klass->instance_size;
3239 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3240 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3241 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3244 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3248 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3252 * Returns NULL and set the cfg exception on error.
3255 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3257 MonoInst *alloc, *ins;
3259 if (mono_class_is_nullable (klass)) {
3260 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3263 /* FIXME: What if the class is shared? We might not
3264 have to get the method address from the RGCTX. */
3265 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3266 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3267 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3269 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3271 return mono_emit_method_call (cfg, method, &val, NULL);
3275 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3279 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3284 // FIXME: This doesn't work yet (class libs tests fail?)
3285 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3288 * Returns NULL and set the cfg exception on error.
3291 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3293 MonoBasicBlock *is_null_bb;
3294 int obj_reg = src->dreg;
3295 int vtable_reg = alloc_preg (cfg);
3296 MonoInst *klass_inst = NULL;
3301 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3302 klass, MONO_RGCTX_INFO_KLASS);
3304 if (is_complex_isinst (klass)) {
3305 /* Complex case, handle by an icall */
3311 args [1] = klass_inst;
3313 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3315 /* Simple case, handled by the code below */
3319 NEW_BBLOCK (cfg, is_null_bb);
3321 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3322 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3324 save_cast_details (cfg, klass, obj_reg);
3326 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3327 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3328 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3330 int klass_reg = alloc_preg (cfg);
3332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3334 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3335 /* the remoting code is broken, access the class for now */
3336 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3337 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3339 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3340 cfg->exception_ptr = klass;
3343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3348 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3351 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3355 MONO_START_BB (cfg, is_null_bb);
3357 reset_cast_details (cfg);
3363 * Returns NULL and set the cfg exception on error.
3366 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3369 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3370 int obj_reg = src->dreg;
3371 int vtable_reg = alloc_preg (cfg);
3372 int res_reg = alloc_preg (cfg);
3373 MonoInst *klass_inst = NULL;
3376 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3378 if (is_complex_isinst (klass)) {
3381 /* Complex case, handle by an icall */
3387 args [1] = klass_inst;
3389 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3391 /* Simple case, the code below can handle it */
3395 NEW_BBLOCK (cfg, is_null_bb);
3396 NEW_BBLOCK (cfg, false_bb);
3397 NEW_BBLOCK (cfg, end_bb);
3399 /* Do the assignment at the beginning, so the other assignment can be if converted */
3400 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3401 ins->type = STACK_OBJ;
3404 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3405 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3409 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3410 g_assert (!context_used);
3411 /* the is_null_bb target simply copies the input register to the output */
3412 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3414 int klass_reg = alloc_preg (cfg);
3417 int rank_reg = alloc_preg (cfg);
3418 int eclass_reg = alloc_preg (cfg);
3420 g_assert (!context_used);
3421 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3424 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3426 if (klass->cast_class == mono_defaults.object_class) {
3427 int parent_reg = alloc_preg (cfg);
3428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3429 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3430 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3431 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3432 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3433 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3434 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3435 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3436 } else if (klass->cast_class == mono_defaults.enum_class) {
3437 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3439 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3440 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3442 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3443 /* Check that the object is a vector too */
3444 int bounds_reg = alloc_preg (cfg);
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3450 /* the is_null_bb target simply copies the input register to the output */
3451 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3453 } else if (mono_class_is_nullable (klass)) {
3454 g_assert (!context_used);
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3456 /* the is_null_bb target simply copies the input register to the output */
3457 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3459 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3460 g_assert (!context_used);
3461 /* the remoting code is broken, access the class for now */
3462 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3463 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3465 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3466 cfg->exception_ptr = klass;
3469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3475 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3477 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3478 /* the is_null_bb target simply copies the input register to the output */
3479 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3484 MONO_START_BB (cfg, false_bb);
3486 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3487 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3489 MONO_START_BB (cfg, is_null_bb);
3491 MONO_START_BB (cfg, end_bb);
3497 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3499 /* This opcode takes as input an object reference and a class, and returns:
3500 0) if the object is an instance of the class,
3501 1) if the object is not instance of the class,
3502 2) if the object is a proxy whose type cannot be determined */
3505 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3506 int obj_reg = src->dreg;
3507 int dreg = alloc_ireg (cfg);
3509 int klass_reg = alloc_preg (cfg);
3511 NEW_BBLOCK (cfg, true_bb);
3512 NEW_BBLOCK (cfg, false_bb);
3513 NEW_BBLOCK (cfg, false2_bb);
3514 NEW_BBLOCK (cfg, end_bb);
3515 NEW_BBLOCK (cfg, no_proxy_bb);
3517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3520 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3521 NEW_BBLOCK (cfg, interface_fail_bb);
3523 tmp_reg = alloc_preg (cfg);
3524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3525 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3526 MONO_START_BB (cfg, interface_fail_bb);
3527 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3529 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3531 tmp_reg = alloc_preg (cfg);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3534 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3536 tmp_reg = alloc_preg (cfg);
3537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3540 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3541 tmp_reg = alloc_preg (cfg);
3542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3543 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3545 tmp_reg = alloc_preg (cfg);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3550 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3551 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3553 MONO_START_BB (cfg, no_proxy_bb);
3555 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3558 MONO_START_BB (cfg, false_bb);
3560 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3563 MONO_START_BB (cfg, false2_bb);
3565 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3568 MONO_START_BB (cfg, true_bb);
3570 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3572 MONO_START_BB (cfg, end_bb);
3575 MONO_INST_NEW (cfg, ins, OP_ICONST);
3577 ins->type = STACK_I4;
3583 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3585 /* This opcode takes as input an object reference and a class, and returns:
3586 0) if the object is an instance of the class,
3587 1) if the object is a proxy whose type cannot be determined
3588 an InvalidCastException exception is thrown otherwhise*/
3591 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3592 int obj_reg = src->dreg;
3593 int dreg = alloc_ireg (cfg);
3594 int tmp_reg = alloc_preg (cfg);
3595 int klass_reg = alloc_preg (cfg);
3597 NEW_BBLOCK (cfg, end_bb);
3598 NEW_BBLOCK (cfg, ok_result_bb);
3600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3603 save_cast_details (cfg, klass, obj_reg);
3605 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3606 NEW_BBLOCK (cfg, interface_fail_bb);
3608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3609 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3610 MONO_START_BB (cfg, interface_fail_bb);
3611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3613 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3615 tmp_reg = alloc_preg (cfg);
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3618 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3620 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3621 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3624 NEW_BBLOCK (cfg, no_proxy_bb);
3626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3628 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3630 tmp_reg = alloc_preg (cfg);
3631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3634 tmp_reg = alloc_preg (cfg);
3635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3637 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3639 NEW_BBLOCK (cfg, fail_1_bb);
3641 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3643 MONO_START_BB (cfg, fail_1_bb);
3645 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3648 MONO_START_BB (cfg, no_proxy_bb);
3650 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3653 MONO_START_BB (cfg, ok_result_bb);
3655 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3657 MONO_START_BB (cfg, end_bb);
3660 MONO_INST_NEW (cfg, ins, OP_ICONST);
3662 ins->type = STACK_I4;
3668 * Returns NULL and set the cfg exception on error.
3670 static G_GNUC_UNUSED MonoInst*
3671 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3675 gpointer *trampoline;
3676 MonoInst *obj, *method_ins, *tramp_ins;
3680 obj = handle_alloc (cfg, klass, FALSE, 0);
3684 /* Inline the contents of mono_delegate_ctor */
3686 /* Set target field */
3687 /* Optimize away setting of NULL target */
3688 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3690 if (cfg->gen_write_barriers) {
3691 dreg = alloc_preg (cfg);
3692 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3693 emit_write_barrier (cfg, ptr, target, 0);
3697 /* Set method field */
3698 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3700 if (cfg->gen_write_barriers) {
3701 dreg = alloc_preg (cfg);
3702 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3703 emit_write_barrier (cfg, ptr, method_ins, 0);
3706 * To avoid looking up the compiled code belonging to the target method
3707 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3708 * store it, and we fill it after the method has been compiled.
3710 if (!cfg->compile_aot && !method->dynamic) {
3711 MonoInst *code_slot_ins;
3714 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3716 domain = mono_domain_get ();
3717 mono_domain_lock (domain);
3718 if (!domain_jit_info (domain)->method_code_hash)
3719 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3720 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3722 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3723 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3725 mono_domain_unlock (domain);
3727 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3732 /* Set invoke_impl field */
3733 if (cfg->compile_aot) {
3734 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3736 trampoline = mono_create_delegate_trampoline (klass);
3737 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3739 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3741 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3747 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3749 MonoJitICallInfo *info;
3751 /* Need to register the icall so it gets an icall wrapper */
3752 info = mono_get_array_new_va_icall (rank);
3754 cfg->flags |= MONO_CFG_HAS_VARARGS;
3756 /* mono_array_new_va () needs a vararg calling convention */
3757 cfg->disable_llvm = TRUE;
3759 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3760 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3764 mono_emit_load_got_addr (MonoCompile *cfg)
3766 MonoInst *getaddr, *dummy_use;
3768 if (!cfg->got_var || cfg->got_var_allocated)
3771 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3772 getaddr->dreg = cfg->got_var->dreg;
3774 /* Add it to the start of the first bblock */
3775 if (cfg->bb_entry->code) {
3776 getaddr->next = cfg->bb_entry->code;
3777 cfg->bb_entry->code = getaddr;
3780 MONO_ADD_INS (cfg->bb_entry, getaddr);
3782 cfg->got_var_allocated = TRUE;
3785 * Add a dummy use to keep the got_var alive, since real uses might
3786 * only be generated by the back ends.
3787 * Add it to end_bblock, so the variable's lifetime covers the whole
3789 * It would be better to make the usage of the got var explicit in all
3790 * cases when the backend needs it (i.e. calls, throw etc.), so this
3791 * wouldn't be needed.
3793 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3794 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3797 static int inline_limit;
3798 static gboolean inline_limit_inited;
3801 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3803 MonoMethodHeaderSummary header;
3805 #ifdef MONO_ARCH_SOFT_FLOAT
3806 MonoMethodSignature *sig = mono_method_signature (method);
3810 if (cfg->generic_sharing_context)
3813 if (cfg->inline_depth > 10)
3816 #ifdef MONO_ARCH_HAVE_LMF_OPS
3817 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3818 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3819 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3824 if (!mono_method_get_header_summary (method, &header))
3827 /*runtime, icall and pinvoke are checked by summary call*/
3828 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3829 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3830 (method->klass->marshalbyref) ||
3834 /* also consider num_locals? */
3835 /* Do the size check early to avoid creating vtables */
3836 if (!inline_limit_inited) {
3837 if (getenv ("MONO_INLINELIMIT"))
3838 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3840 inline_limit = INLINE_LENGTH_LIMIT;
3841 inline_limit_inited = TRUE;
3843 if (header.code_size >= inline_limit)
3847 * if we can initialize the class of the method right away, we do,
3848 * otherwise we don't allow inlining if the class needs initialization,
3849 * since it would mean inserting a call to mono_runtime_class_init()
3850 * inside the inlined code
3852 if (!(cfg->opt & MONO_OPT_SHARED)) {
3853 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3854 if (cfg->run_cctors && method->klass->has_cctor) {
3855 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3856 if (!method->klass->runtime_info)
3857 /* No vtable created yet */
3859 vtable = mono_class_vtable (cfg->domain, method->klass);
3862 /* This makes so that inline cannot trigger */
3863 /* .cctors: too many apps depend on them */
3864 /* running with a specific order... */
3865 if (! vtable->initialized)
3867 mono_runtime_class_init (vtable);
3869 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3870 if (!method->klass->runtime_info)
3871 /* No vtable created yet */
3873 vtable = mono_class_vtable (cfg->domain, method->klass);
3876 if (!vtable->initialized)
3881 * If we're compiling for shared code
3882 * the cctor will need to be run at aot method load time, for example,
3883 * or at the end of the compilation of the inlining method.
3885 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3890 * CAS - do not inline methods with declarative security
3891 * Note: this has to be before any possible return TRUE;
3893 if (mono_method_has_declsec (method))
3896 #ifdef MONO_ARCH_SOFT_FLOAT
3898 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3900 for (i = 0; i < sig->param_count; ++i)
3901 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3909 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3911 if (vtable->initialized && !cfg->compile_aot)
3914 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3917 if (!mono_class_needs_cctor_run (vtable->klass, method))
3920 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3921 /* The initialization is already done before the method is called */
3928 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3932 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3934 mono_class_init (klass);
3935 size = mono_class_array_element_size (klass);
3937 mult_reg = alloc_preg (cfg);
3938 array_reg = arr->dreg;
3939 index_reg = index->dreg;
3941 #if SIZEOF_REGISTER == 8
3942 /* The array reg is 64 bits but the index reg is only 32 */
3943 if (COMPILE_LLVM (cfg)) {
3945 index2_reg = index_reg;
3947 index2_reg = alloc_preg (cfg);
3948 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3951 if (index->type == STACK_I8) {
3952 index2_reg = alloc_preg (cfg);
3953 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3955 index2_reg = index_reg;
3960 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3962 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3963 if (size == 1 || size == 2 || size == 4 || size == 8) {
3964 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3966 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3967 ins->type = STACK_PTR;
3973 add_reg = alloc_preg (cfg);
3975 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3976 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3977 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3978 ins->type = STACK_PTR;
3979 MONO_ADD_INS (cfg->cbb, ins);
3984 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3986 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3988 int bounds_reg = alloc_preg (cfg);
3989 int add_reg = alloc_preg (cfg);
3990 int mult_reg = alloc_preg (cfg);
3991 int mult2_reg = alloc_preg (cfg);
3992 int low1_reg = alloc_preg (cfg);
3993 int low2_reg = alloc_preg (cfg);
3994 int high1_reg = alloc_preg (cfg);
3995 int high2_reg = alloc_preg (cfg);
3996 int realidx1_reg = alloc_preg (cfg);
3997 int realidx2_reg = alloc_preg (cfg);
3998 int sum_reg = alloc_preg (cfg);
4003 mono_class_init (klass);
4004 size = mono_class_array_element_size (klass);
4006 index1 = index_ins1->dreg;
4007 index2 = index_ins2->dreg;
4009 /* range checking */
4010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4011 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4013 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4014 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4015 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4016 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4017 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4018 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4019 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4021 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4022 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4023 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4024 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4025 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4026 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4027 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4029 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4030 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4032 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4033 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4035 ins->type = STACK_MP;
4037 MONO_ADD_INS (cfg->cbb, ins);
4044 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4048 MonoMethod *addr_method;
4051 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4054 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4056 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4057 /* emit_ldelema_2 depends on OP_LMUL */
4058 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4059 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4063 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4064 addr_method = mono_marshal_get_array_address (rank, element_size);
4065 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4070 static MonoBreakPolicy
4071 always_insert_breakpoint (MonoMethod *method)
4073 return MONO_BREAK_POLICY_ALWAYS;
4076 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4079 * mono_set_break_policy:
4080 * policy_callback: the new callback function
4082 * Allow embedders to decide wherther to actually obey breakpoint instructions
4083 * (both break IL instructions and Debugger.Break () method calls), for example
4084 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4085 * untrusted or semi-trusted code.
4087 * @policy_callback will be called every time a break point instruction needs to
4088 * be inserted with the method argument being the method that calls Debugger.Break()
4089 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4090 * if it wants the breakpoint to not be effective in the given method.
4091 * #MONO_BREAK_POLICY_ALWAYS is the default.
4094 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4096 if (policy_callback)
4097 break_policy_func = policy_callback;
4099 break_policy_func = always_insert_breakpoint;
4103 should_insert_brekpoint (MonoMethod *method) {
4104 switch (break_policy_func (method)) {
4105 case MONO_BREAK_POLICY_ALWAYS:
4107 case MONO_BREAK_POLICY_NEVER:
4109 case MONO_BREAK_POLICY_ON_DBG:
4110 return mono_debug_using_mono_debugger ();
4112 g_warning ("Incorrect value returned from break policy callback");
4117 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4119 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4121 MonoInst *addr, *store, *load;
4122 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4124 /* the bounds check is already done by the callers */
4125 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4127 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4128 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4130 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4131 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4137 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4139 MonoInst *ins = NULL;
4140 #ifdef MONO_ARCH_SIMD_INTRINSICS
4141 if (cfg->opt & MONO_OPT_SIMD) {
4142 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4152 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4154 MonoInst *ins = NULL;
4156 static MonoClass *runtime_helpers_class = NULL;
4157 if (! runtime_helpers_class)
4158 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4159 "System.Runtime.CompilerServices", "RuntimeHelpers");
4161 if (cmethod->klass == mono_defaults.string_class) {
4162 if (strcmp (cmethod->name, "get_Chars") == 0) {
4163 int dreg = alloc_ireg (cfg);
4164 int index_reg = alloc_preg (cfg);
4165 int mult_reg = alloc_preg (cfg);
4166 int add_reg = alloc_preg (cfg);
4168 #if SIZEOF_REGISTER == 8
4169 /* The array reg is 64 bits but the index reg is only 32 */
4170 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4172 index_reg = args [1]->dreg;
4174 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4176 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4177 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4178 add_reg = ins->dreg;
4179 /* Avoid a warning */
4181 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4185 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4186 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4187 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4189 type_from_op (ins, NULL, NULL);
4191 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4192 int dreg = alloc_ireg (cfg);
4193 /* Decompose later to allow more optimizations */
4194 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4195 ins->type = STACK_I4;
4196 ins->flags |= MONO_INST_FAULT;
4197 cfg->cbb->has_array_access = TRUE;
4198 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4201 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4202 int mult_reg = alloc_preg (cfg);
4203 int add_reg = alloc_preg (cfg);
4205 /* The corlib functions check for oob already. */
4206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4207 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4208 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4209 return cfg->cbb->last_ins;
4212 } else if (cmethod->klass == mono_defaults.object_class) {
4214 if (strcmp (cmethod->name, "GetType") == 0) {
4215 int dreg = alloc_preg (cfg);
4216 int vt_reg = alloc_preg (cfg);
4217 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4218 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4219 type_from_op (ins, NULL, NULL);
4222 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4223 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4224 int dreg = alloc_ireg (cfg);
4225 int t1 = alloc_ireg (cfg);
4227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4228 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4229 ins->type = STACK_I4;
4233 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4234 MONO_INST_NEW (cfg, ins, OP_NOP);
4235 MONO_ADD_INS (cfg->cbb, ins);
4239 } else if (cmethod->klass == mono_defaults.array_class) {
4240 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4241 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4243 #ifndef MONO_BIG_ARRAYS
4245 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4248 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4249 int dreg = alloc_ireg (cfg);
4250 int bounds_reg = alloc_ireg (cfg);
4251 MonoBasicBlock *end_bb, *szarray_bb;
4252 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4254 NEW_BBLOCK (cfg, end_bb);
4255 NEW_BBLOCK (cfg, szarray_bb);
4257 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4258 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4259 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4260 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4261 /* Non-szarray case */
4263 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4264 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4266 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4267 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4268 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4269 MONO_START_BB (cfg, szarray_bb);
4272 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4273 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4275 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4276 MONO_START_BB (cfg, end_bb);
4278 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4279 ins->type = STACK_I4;
4285 if (cmethod->name [0] != 'g')
4288 if (strcmp (cmethod->name, "get_Rank") == 0) {
4289 int dreg = alloc_ireg (cfg);
4290 int vtable_reg = alloc_preg (cfg);
4291 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4292 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4293 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4294 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4295 type_from_op (ins, NULL, NULL);
4298 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4299 int dreg = alloc_ireg (cfg);
4301 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4302 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4303 type_from_op (ins, NULL, NULL);
4308 } else if (cmethod->klass == runtime_helpers_class) {
4310 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4311 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4315 } else if (cmethod->klass == mono_defaults.thread_class) {
4316 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4317 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4318 MONO_ADD_INS (cfg->cbb, ins);
4320 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4321 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4322 MONO_ADD_INS (cfg->cbb, ins);
4325 } else if (cmethod->klass == mono_defaults.monitor_class) {
4326 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4327 /* The trampolines don't work under SGEN */
4328 gboolean is_moving_gc = mono_gc_is_moving ();
4330 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
4333 if (COMPILE_LLVM (cfg)) {
4335 * Pass the argument normally, the LLVM backend will handle the
4336 * calling convention problems.
4338 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4340 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4341 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4342 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4343 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4346 return (MonoInst*)call;
4347 } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
4350 if (COMPILE_LLVM (cfg)) {
4351 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4353 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4354 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4355 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4356 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4359 return (MonoInst*)call;
4361 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4362 MonoMethod *fast_method = NULL;
4364 /* Avoid infinite recursion */
4365 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4366 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4367 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4370 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4371 strcmp (cmethod->name, "Exit") == 0)
4372 fast_method = mono_monitor_get_fast_path (cmethod);
4376 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4378 } else if (cmethod->klass->image == mono_defaults.corlib &&
4379 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4380 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4383 #if SIZEOF_REGISTER == 8
4384 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4385 /* 64 bit reads are already atomic */
4386 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4387 ins->dreg = mono_alloc_preg (cfg);
4388 ins->inst_basereg = args [0]->dreg;
4389 ins->inst_offset = 0;
4390 MONO_ADD_INS (cfg->cbb, ins);
4394 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4395 if (strcmp (cmethod->name, "Increment") == 0) {
4396 MonoInst *ins_iconst;
4399 if (fsig->params [0]->type == MONO_TYPE_I4)
4400 opcode = OP_ATOMIC_ADD_NEW_I4;
4401 #if SIZEOF_REGISTER == 8
4402 else if (fsig->params [0]->type == MONO_TYPE_I8)
4403 opcode = OP_ATOMIC_ADD_NEW_I8;
4406 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4407 ins_iconst->inst_c0 = 1;
4408 ins_iconst->dreg = mono_alloc_ireg (cfg);
4409 MONO_ADD_INS (cfg->cbb, ins_iconst);
4411 MONO_INST_NEW (cfg, ins, opcode);
4412 ins->dreg = mono_alloc_ireg (cfg);
4413 ins->inst_basereg = args [0]->dreg;
4414 ins->inst_offset = 0;
4415 ins->sreg2 = ins_iconst->dreg;
4416 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4417 MONO_ADD_INS (cfg->cbb, ins);
4419 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4420 MonoInst *ins_iconst;
4423 if (fsig->params [0]->type == MONO_TYPE_I4)
4424 opcode = OP_ATOMIC_ADD_NEW_I4;
4425 #if SIZEOF_REGISTER == 8
4426 else if (fsig->params [0]->type == MONO_TYPE_I8)
4427 opcode = OP_ATOMIC_ADD_NEW_I8;
4430 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4431 ins_iconst->inst_c0 = -1;
4432 ins_iconst->dreg = mono_alloc_ireg (cfg);
4433 MONO_ADD_INS (cfg->cbb, ins_iconst);
4435 MONO_INST_NEW (cfg, ins, opcode);
4436 ins->dreg = mono_alloc_ireg (cfg);
4437 ins->inst_basereg = args [0]->dreg;
4438 ins->inst_offset = 0;
4439 ins->sreg2 = ins_iconst->dreg;
4440 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4441 MONO_ADD_INS (cfg->cbb, ins);
4443 } else if (strcmp (cmethod->name, "Add") == 0) {
4446 if (fsig->params [0]->type == MONO_TYPE_I4)
4447 opcode = OP_ATOMIC_ADD_NEW_I4;
4448 #if SIZEOF_REGISTER == 8
4449 else if (fsig->params [0]->type == MONO_TYPE_I8)
4450 opcode = OP_ATOMIC_ADD_NEW_I8;
4454 MONO_INST_NEW (cfg, ins, opcode);
4455 ins->dreg = mono_alloc_ireg (cfg);
4456 ins->inst_basereg = args [0]->dreg;
4457 ins->inst_offset = 0;
4458 ins->sreg2 = args [1]->dreg;
4459 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4460 MONO_ADD_INS (cfg->cbb, ins);
4463 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4465 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4466 if (strcmp (cmethod->name, "Exchange") == 0) {
4468 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4470 if (fsig->params [0]->type == MONO_TYPE_I4)
4471 opcode = OP_ATOMIC_EXCHANGE_I4;
4472 #if SIZEOF_REGISTER == 8
4473 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4474 (fsig->params [0]->type == MONO_TYPE_I))
4475 opcode = OP_ATOMIC_EXCHANGE_I8;
4477 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4478 opcode = OP_ATOMIC_EXCHANGE_I4;
4483 MONO_INST_NEW (cfg, ins, opcode);
4484 ins->dreg = mono_alloc_ireg (cfg);
4485 ins->inst_basereg = args [0]->dreg;
4486 ins->inst_offset = 0;
4487 ins->sreg2 = args [1]->dreg;
4488 MONO_ADD_INS (cfg->cbb, ins);
4490 switch (fsig->params [0]->type) {
4492 ins->type = STACK_I4;
4496 ins->type = STACK_I8;
4498 case MONO_TYPE_OBJECT:
4499 ins->type = STACK_OBJ;
4502 g_assert_not_reached ();
4505 if (cfg->gen_write_barriers && is_ref)
4506 emit_write_barrier (cfg, args [0], args [1], -1);
4508 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4510 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4511 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4513 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4514 if (fsig->params [1]->type == MONO_TYPE_I4)
4516 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4517 size = sizeof (gpointer);
4518 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4521 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4522 ins->dreg = alloc_ireg (cfg);
4523 ins->sreg1 = args [0]->dreg;
4524 ins->sreg2 = args [1]->dreg;
4525 ins->sreg3 = args [2]->dreg;
4526 ins->type = STACK_I4;
4527 MONO_ADD_INS (cfg->cbb, ins);
4528 } else if (size == 8) {
4529 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4530 ins->dreg = alloc_ireg (cfg);
4531 ins->sreg1 = args [0]->dreg;
4532 ins->sreg2 = args [1]->dreg;
4533 ins->sreg3 = args [2]->dreg;
4534 ins->type = STACK_I8;
4535 MONO_ADD_INS (cfg->cbb, ins);
4537 /* g_assert_not_reached (); */
4539 if (cfg->gen_write_barriers && is_ref)
4540 emit_write_barrier (cfg, args [0], args [1], -1);
4542 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4546 } else if (cmethod->klass->image == mono_defaults.corlib) {
4547 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4548 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4549 if (should_insert_brekpoint (cfg->method))
4550 MONO_INST_NEW (cfg, ins, OP_BREAK);
4552 MONO_INST_NEW (cfg, ins, OP_NOP);
4553 MONO_ADD_INS (cfg->cbb, ins);
4556 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4557 && strcmp (cmethod->klass->name, "Environment") == 0) {
4559 EMIT_NEW_ICONST (cfg, ins, 1);
4561 EMIT_NEW_ICONST (cfg, ins, 0);
4565 } else if (cmethod->klass == mono_defaults.math_class) {
4567 * There is general branches code for Min/Max, but it does not work for
4569 * http://everything2.com/?node_id=1051618
4573 #ifdef MONO_ARCH_SIMD_INTRINSICS
4574 if (cfg->opt & MONO_OPT_SIMD) {
4575 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4581 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4585 * This entry point could be used later for arbitrary method
4588 inline static MonoInst*
4589 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4590 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4592 if (method->klass == mono_defaults.string_class) {
4593 /* managed string allocation support */
4594 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4595 MonoInst *iargs [2];
4596 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4597 MonoMethod *managed_alloc = NULL;
4599 g_assert (vtable); /*Should not fail since it System.String*/
4600 #ifndef MONO_CROSS_COMPILE
4601 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4605 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4606 iargs [1] = args [0];
4607 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4614 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4616 MonoInst *store, *temp;
4619 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4620 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4623 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4624 * would be different than the MonoInst's used to represent arguments, and
4625 * the ldelema implementation can't deal with that.
4626 * Solution: When ldelema is used on an inline argument, create a var for
4627 * it, emit ldelema on that var, and emit the saving code below in
4628 * inline_method () if needed.
4630 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4631 cfg->args [i] = temp;
4632 /* This uses cfg->args [i] which is set by the preceeding line */
4633 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4634 store->cil_code = sp [0]->cil_code;
4639 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4640 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4642 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4644 check_inline_called_method_name_limit (MonoMethod *called_method)
4647 static char *limit = NULL;
4649 if (limit == NULL) {
4650 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4652 if (limit_string != NULL)
4653 limit = limit_string;
4655 limit = (char *) "";
4658 if (limit [0] != '\0') {
4659 char *called_method_name = mono_method_full_name (called_method, TRUE);
4661 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4662 g_free (called_method_name);
4664 //return (strncmp_result <= 0);
4665 return (strncmp_result == 0);
4672 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4674 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4677 static char *limit = NULL;
4679 if (limit == NULL) {
4680 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4681 if (limit_string != NULL) {
4682 limit = limit_string;
4684 limit = (char *) "";
4688 if (limit [0] != '\0') {
4689 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4691 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4692 g_free (caller_method_name);
4694 //return (strncmp_result <= 0);
4695 return (strncmp_result == 0);
4703 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4704 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4706 MonoInst *ins, *rvar = NULL;
4707 MonoMethodHeader *cheader;
4708 MonoBasicBlock *ebblock, *sbblock;
4710 MonoMethod *prev_inlined_method;
4711 MonoInst **prev_locals, **prev_args;
4712 MonoType **prev_arg_types;
4713 guint prev_real_offset;
4714 GHashTable *prev_cbb_hash;
4715 MonoBasicBlock **prev_cil_offset_to_bb;
4716 MonoBasicBlock *prev_cbb;
4717 unsigned char* prev_cil_start;
4718 guint32 prev_cil_offset_to_bb_len;
4719 MonoMethod *prev_current_method;
4720 MonoGenericContext *prev_generic_context;
4721 gboolean ret_var_set, prev_ret_var_set;
4723 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4725 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4726 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4729 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4730 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4734 if (cfg->verbose_level > 2)
4735 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4737 if (!cmethod->inline_info) {
4738 mono_jit_stats.inlineable_methods++;
4739 cmethod->inline_info = 1;
4742 /* allocate local variables */
4743 cheader = mono_method_get_header (cmethod);
4745 if (cheader == NULL || mono_loader_get_last_error ()) {
4747 mono_metadata_free_mh (cheader);
4748 mono_loader_clear_error ();
4752 /*Must verify before creating locals as it can cause the JIT to assert.*/
4753 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4754 mono_metadata_free_mh (cheader);
4758 /* allocate space to store the return value */
4759 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4760 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4764 prev_locals = cfg->locals;
4765 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4766 for (i = 0; i < cheader->num_locals; ++i)
4767 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4769 /* allocate start and end blocks */
4770 /* This is needed so if the inline is aborted, we can clean up */
4771 NEW_BBLOCK (cfg, sbblock);
4772 sbblock->real_offset = real_offset;
4774 NEW_BBLOCK (cfg, ebblock);
4775 ebblock->block_num = cfg->num_bblocks++;
4776 ebblock->real_offset = real_offset;
4778 prev_args = cfg->args;
4779 prev_arg_types = cfg->arg_types;
4780 prev_inlined_method = cfg->inlined_method;
4781 cfg->inlined_method = cmethod;
4782 cfg->ret_var_set = FALSE;
4783 cfg->inline_depth ++;
4784 prev_real_offset = cfg->real_offset;
4785 prev_cbb_hash = cfg->cbb_hash;
4786 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4787 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4788 prev_cil_start = cfg->cil_start;
4789 prev_cbb = cfg->cbb;
4790 prev_current_method = cfg->current_method;
4791 prev_generic_context = cfg->generic_context;
4792 prev_ret_var_set = cfg->ret_var_set;
4794 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4796 ret_var_set = cfg->ret_var_set;
4798 cfg->inlined_method = prev_inlined_method;
4799 cfg->real_offset = prev_real_offset;
4800 cfg->cbb_hash = prev_cbb_hash;
4801 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4802 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4803 cfg->cil_start = prev_cil_start;
4804 cfg->locals = prev_locals;
4805 cfg->args = prev_args;
4806 cfg->arg_types = prev_arg_types;
4807 cfg->current_method = prev_current_method;
4808 cfg->generic_context = prev_generic_context;
4809 cfg->ret_var_set = prev_ret_var_set;
4810 cfg->inline_depth --;
4812 if ((costs >= 0 && costs < 60) || inline_allways) {
4813 if (cfg->verbose_level > 2)
4814 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4816 mono_jit_stats.inlined_methods++;
4818 /* always add some code to avoid block split failures */
4819 MONO_INST_NEW (cfg, ins, OP_NOP);
4820 MONO_ADD_INS (prev_cbb, ins);
4822 prev_cbb->next_bb = sbblock;
4823 link_bblock (cfg, prev_cbb, sbblock);
4826 * Get rid of the begin and end bblocks if possible to aid local
4829 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4831 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4832 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4834 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4835 MonoBasicBlock *prev = ebblock->in_bb [0];
4836 mono_merge_basic_blocks (cfg, prev, ebblock);
4838 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4839 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4840 cfg->cbb = prev_cbb;
4848 * If the inlined method contains only a throw, then the ret var is not
4849 * set, so set it to a dummy value.
4852 static double r8_0 = 0.0;
4854 switch (rvar->type) {
4856 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4859 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4864 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4867 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4868 ins->type = STACK_R8;
4869 ins->inst_p0 = (void*)&r8_0;
4870 ins->dreg = rvar->dreg;
4871 MONO_ADD_INS (cfg->cbb, ins);
4874 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4877 g_assert_not_reached ();
4881 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4884 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4887 if (cfg->verbose_level > 2)
4888 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4889 cfg->exception_type = MONO_EXCEPTION_NONE;
4890 mono_loader_clear_error ();
4892 /* This gets rid of the newly added bblocks */
4893 cfg->cbb = prev_cbb;
4895 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4900 * Some of these comments may well be out-of-date.
4901 * Design decisions: we do a single pass over the IL code (and we do bblock
4902 * splitting/merging in the few cases when it's required: a back jump to an IL
4903 * address that was not already seen as bblock starting point).
4904 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4905 * Complex operations are decomposed in simpler ones right away. We need to let the
4906 * arch-specific code peek and poke inside this process somehow (except when the
4907 * optimizations can take advantage of the full semantic info of coarse opcodes).
4908 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4909 * MonoInst->opcode initially is the IL opcode or some simplification of that
4910 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4911 * opcode with value bigger than OP_LAST.
4912 * At this point the IR can be handed over to an interpreter, a dumb code generator
4913 * or to the optimizing code generator that will translate it to SSA form.
4915 * Profiling directed optimizations.
4916 * We may compile by default with few or no optimizations and instrument the code
4917 * or the user may indicate what methods to optimize the most either in a config file
4918 * or through repeated runs where the compiler applies offline the optimizations to
4919 * each method and then decides if it was worth it.
4922 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4923 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4924 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4925 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4926 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4927 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4928 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4929 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4931 /* offset from br.s -> br like opcodes */
4932 #define BIG_BRANCH_OFFSET 13
4935 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4937 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4939 return b == NULL || b == bb;
4943 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4945 unsigned char *ip = start;
4946 unsigned char *target;
4949 MonoBasicBlock *bblock;
4950 const MonoOpcode *opcode;
4953 cli_addr = ip - start;
4954 i = mono_opcode_value ((const guint8 **)&ip, end);
4957 opcode = &mono_opcodes [i];
4958 switch (opcode->argument) {
4959 case MonoInlineNone:
4962 case MonoInlineString:
4963 case MonoInlineType:
4964 case MonoInlineField:
4965 case MonoInlineMethod:
4968 case MonoShortInlineR:
4975 case MonoShortInlineVar:
4976 case MonoShortInlineI:
4979 case MonoShortInlineBrTarget:
4980 target = start + cli_addr + 2 + (signed char)ip [1];
4981 GET_BBLOCK (cfg, bblock, target);
4984 GET_BBLOCK (cfg, bblock, ip);
4986 case MonoInlineBrTarget:
4987 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4988 GET_BBLOCK (cfg, bblock, target);
4991 GET_BBLOCK (cfg, bblock, ip);
4993 case MonoInlineSwitch: {
4994 guint32 n = read32 (ip + 1);
4997 cli_addr += 5 + 4 * n;
4998 target = start + cli_addr;
4999 GET_BBLOCK (cfg, bblock, target);
5001 for (j = 0; j < n; ++j) {
5002 target = start + cli_addr + (gint32)read32 (ip);
5003 GET_BBLOCK (cfg, bblock, target);
5013 g_assert_not_reached ();
5016 if (i == CEE_THROW) {
5017 unsigned char *bb_start = ip - 1;
5019 /* Find the start of the bblock containing the throw */
5021 while ((bb_start >= start) && !bblock) {
5022 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5026 bblock->out_of_line = 1;
5035 static inline MonoMethod *
5036 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5040 if (m->wrapper_type != MONO_WRAPPER_NONE)
5041 return mono_method_get_wrapper_data (m, token);
5043 method = mono_get_method_full (m->klass->image, token, klass, context);
5048 static inline MonoMethod *
5049 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5051 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5053 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5059 static inline MonoClass*
5060 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5064 if (method->wrapper_type != MONO_WRAPPER_NONE)
5065 klass = mono_method_get_wrapper_data (method, token);
5067 klass = mono_class_get_full (method->klass->image, token, context);
5069 mono_class_init (klass);
5074 * Returns TRUE if the JIT should abort inlining because "callee"
5075 * is influenced by security attributes.
5078 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5082 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5086 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5087 if (result == MONO_JIT_SECURITY_OK)
5090 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5091 /* Generate code to throw a SecurityException before the actual call/link */
5092 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5095 NEW_ICONST (cfg, args [0], 4);
5096 NEW_METHODCONST (cfg, args [1], caller);
5097 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5098 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5099 /* don't hide previous results */
5100 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5101 cfg->exception_data = result;
5109 throw_exception (void)
5111 static MonoMethod *method = NULL;
5114 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5115 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5122 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5124 MonoMethod *thrower = throw_exception ();
5127 EMIT_NEW_PCONST (cfg, args [0], ex);
5128 mono_emit_method_call (cfg, thrower, args, NULL);
5132 * Return the original method is a wrapper is specified. We can only access
5133 * the custom attributes from the original method.
5136 get_original_method (MonoMethod *method)
5138 if (method->wrapper_type == MONO_WRAPPER_NONE)
5141 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5142 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5145 /* in other cases we need to find the original method */
5146 return mono_marshal_method_from_wrapper (method);
5150 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5151 MonoBasicBlock *bblock, unsigned char *ip)
5153 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5154 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5156 emit_throw_exception (cfg, ex);
5160 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5161 MonoBasicBlock *bblock, unsigned char *ip)
5163 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5164 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5166 emit_throw_exception (cfg, ex);
5170 * Check that the IL instructions at ip are the array initialization
5171 * sequence and return the pointer to the data and the size.
5174 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5177 * newarr[System.Int32]
5179 * ldtoken field valuetype ...
5180 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5182 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5183 guint32 token = read32 (ip + 7);
5184 guint32 field_token = read32 (ip + 2);
5185 guint32 field_index = field_token & 0xffffff;
5187 const char *data_ptr;
5189 MonoMethod *cmethod;
5190 MonoClass *dummy_class;
5191 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5197 *out_field_token = field_token;
5199 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5202 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5204 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5205 case MONO_TYPE_BOOLEAN:
5209 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5210 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5211 case MONO_TYPE_CHAR:
5221 return NULL; /* stupid ARM FP swapped format */
5231 if (size > mono_type_size (field->type, &dummy_align))
5234 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5235 if (!method->klass->image->dynamic) {
5236 field_index = read32 (ip + 2) & 0xffffff;
5237 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5238 data_ptr = mono_image_rva_map (method->klass->image, rva);
5239 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5240 /* for aot code we do the lookup on load */
5241 if (aot && data_ptr)
5242 return GUINT_TO_POINTER (rva);
5244 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5246 data_ptr = mono_field_get_data (field);
5254 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5256 char *method_fname = mono_method_full_name (method, TRUE);
5258 MonoMethodHeader *header = mono_method_get_header (method);
5260 if (header->code_size == 0)
5261 method_code = g_strdup ("method body is empty.");
5263 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5264 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5265 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5266 g_free (method_fname);
5267 g_free (method_code);
5268 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5272 set_exception_object (MonoCompile *cfg, MonoException *exception)
5274 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5275 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5276 cfg->exception_ptr = exception;
5280 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5284 if (cfg->generic_sharing_context)
5285 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5287 type = &klass->byval_arg;
5288 return MONO_TYPE_IS_REFERENCE (type);
5292 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5295 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5296 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5297 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5298 /* Optimize reg-reg moves away */
5300 * Can't optimize other opcodes, since sp[0] might point to
5301 * the last ins of a decomposed opcode.
5303 sp [0]->dreg = (cfg)->locals [n]->dreg;
5305 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5310 * ldloca inhibits many optimizations so try to get rid of it in common
5313 static inline unsigned char *
5314 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5323 local = read16 (ip + 2);
5327 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5328 gboolean skip = FALSE;
5330 /* From the INITOBJ case */
5331 token = read32 (ip + 2);
5332 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5333 CHECK_TYPELOAD (klass);
5334 if (generic_class_is_reference_type (cfg, klass)) {
5335 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5336 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5337 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5338 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5339 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5352 is_exception_class (MonoClass *class)
5355 if (class == mono_defaults.exception_class)
5357 class = class->parent;
5363 * mono_method_to_ir:
5365 * Translate the .net IL into linear IR.
5368 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5369 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5370 guint inline_offset, gboolean is_virtual_call)
5373 MonoInst *ins, **sp, **stack_start;
5374 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5375 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5376 MonoMethod *cmethod, *method_definition;
5377 MonoInst **arg_array;
5378 MonoMethodHeader *header;
5380 guint32 token, ins_flag;
5382 MonoClass *constrained_call = NULL;
5383 unsigned char *ip, *end, *target, *err_pos;
5384 static double r8_0 = 0.0;
5385 MonoMethodSignature *sig;
5386 MonoGenericContext *generic_context = NULL;
5387 MonoGenericContainer *generic_container = NULL;
5388 MonoType **param_types;
5389 int i, n, start_new_bblock, dreg;
5390 int num_calls = 0, inline_costs = 0;
5391 int breakpoint_id = 0;
5393 MonoBoolean security, pinvoke;
5394 MonoSecurityManager* secman = NULL;
5395 MonoDeclSecurityActions actions;
5396 GSList *class_inits = NULL;
5397 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5399 gboolean init_locals, seq_points, skip_dead_blocks;
5401 /* serialization and xdomain stuff may need access to private fields and methods */
5402 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5403 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5404 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5405 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5406 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5407 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5409 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5411 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5412 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5413 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5414 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5416 image = method->klass->image;
5417 header = mono_method_get_header (method);
5419 MonoLoaderError *error;
5421 if ((error = mono_loader_get_last_error ())) {
5422 cfg->exception_type = error->exception_type;
5424 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5425 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5427 goto exception_exit;
5429 generic_container = mono_method_get_generic_container (method);
5430 sig = mono_method_signature (method);
5431 num_args = sig->hasthis + sig->param_count;
5432 ip = (unsigned char*)header->code;
5433 cfg->cil_start = ip;
5434 end = ip + header->code_size;
5435 mono_jit_stats.cil_code_size += header->code_size;
5436 init_locals = header->init_locals;
5438 seq_points = cfg->gen_seq_points && cfg->method == method;
5441 * Methods without init_locals set could cause asserts in various passes
5446 method_definition = method;
5447 while (method_definition->is_inflated) {
5448 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5449 method_definition = imethod->declaring;
5452 /* SkipVerification is not allowed if core-clr is enabled */
5453 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5455 dont_verify_stloc = TRUE;
5458 if (mono_debug_using_mono_debugger ())
5459 cfg->keep_cil_nops = TRUE;
5461 if (sig->is_inflated)
5462 generic_context = mono_method_get_context (method);
5463 else if (generic_container)
5464 generic_context = &generic_container->context;
5465 cfg->generic_context = generic_context;
5467 if (!cfg->generic_sharing_context)
5468 g_assert (!sig->has_type_parameters);
5470 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5471 g_assert (method->is_inflated);
5472 g_assert (mono_method_get_context (method)->method_inst);
5474 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5475 g_assert (sig->generic_param_count);
5477 if (cfg->method == method) {
5478 cfg->real_offset = 0;
5480 cfg->real_offset = inline_offset;
5483 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5484 cfg->cil_offset_to_bb_len = header->code_size;
5486 cfg->current_method = method;
5488 if (cfg->verbose_level > 2)
5489 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5491 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5493 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5494 for (n = 0; n < sig->param_count; ++n)
5495 param_types [n + sig->hasthis] = sig->params [n];
5496 cfg->arg_types = param_types;
5498 dont_inline = g_list_prepend (dont_inline, method);
5499 if (cfg->method == method) {
5501 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5502 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5505 NEW_BBLOCK (cfg, start_bblock);
5506 cfg->bb_entry = start_bblock;
5507 start_bblock->cil_code = NULL;
5508 start_bblock->cil_length = 0;
5511 NEW_BBLOCK (cfg, end_bblock);
5512 cfg->bb_exit = end_bblock;
5513 end_bblock->cil_code = NULL;
5514 end_bblock->cil_length = 0;
5515 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5516 g_assert (cfg->num_bblocks == 2);
5518 arg_array = cfg->args;
5520 if (header->num_clauses) {
5521 cfg->spvars = g_hash_table_new (NULL, NULL);
5522 cfg->exvars = g_hash_table_new (NULL, NULL);
5524 /* handle exception clauses */
5525 for (i = 0; i < header->num_clauses; ++i) {
5526 MonoBasicBlock *try_bb;
5527 MonoExceptionClause *clause = &header->clauses [i];
5528 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5529 try_bb->real_offset = clause->try_offset;
5530 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5531 tblock->real_offset = clause->handler_offset;
5532 tblock->flags |= BB_EXCEPTION_HANDLER;
5534 link_bblock (cfg, try_bb, tblock);
5536 if (*(ip + clause->handler_offset) == CEE_POP)
5537 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5539 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5540 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5541 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5542 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5543 MONO_ADD_INS (tblock, ins);
5545 /* todo: is a fault block unsafe to optimize? */
5546 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5547 tblock->flags |= BB_EXCEPTION_UNSAFE;
5551 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5553 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5555 /* catch and filter blocks get the exception object on the stack */
5556 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5557 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5558 MonoInst *dummy_use;
5560 /* mostly like handle_stack_args (), but just sets the input args */
5561 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5562 tblock->in_scount = 1;
5563 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5564 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5567 * Add a dummy use for the exvar so its liveness info will be
5571 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5574 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5575 tblock->flags |= BB_EXCEPTION_HANDLER;
5576 tblock->real_offset = clause->data.filter_offset;
5577 tblock->in_scount = 1;
5578 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5579 /* The filter block shares the exvar with the handler block */
5580 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5581 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5582 MONO_ADD_INS (tblock, ins);
5586 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5587 clause->data.catch_class &&
5588 cfg->generic_sharing_context &&
5589 mono_class_check_context_used (clause->data.catch_class)) {
5591 * In shared generic code with catch
5592 * clauses containing type variables
5593 * the exception handling code has to
5594 * be able to get to the rgctx.
5595 * Therefore we have to make sure that
5596 * the vtable/mrgctx argument (for
5597 * static or generic methods) or the
5598 * "this" argument (for non-static
5599 * methods) are live.
5601 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5602 mini_method_get_context (method)->method_inst ||
5603 method->klass->valuetype) {
5604 mono_get_vtable_var (cfg);
5606 MonoInst *dummy_use;
5608 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5613 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5614 cfg->cbb = start_bblock;
5615 cfg->args = arg_array;
5616 mono_save_args (cfg, sig, inline_args);
5619 /* FIRST CODE BLOCK */
5620 NEW_BBLOCK (cfg, bblock);
5621 bblock->cil_code = ip;
5625 ADD_BBLOCK (cfg, bblock);
5627 if (cfg->method == method) {
5628 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5629 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5630 MONO_INST_NEW (cfg, ins, OP_BREAK);
5631 MONO_ADD_INS (bblock, ins);
5635 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5636 secman = mono_security_manager_get_methods ();
5638 security = (secman && mono_method_has_declsec (method));
5639 /* at this point having security doesn't mean we have any code to generate */
5640 if (security && (cfg->method == method)) {
5641 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5642 * And we do not want to enter the next section (with allocation) if we
5643 * have nothing to generate */
5644 security = mono_declsec_get_demands (method, &actions);
5647 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5648 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5650 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5651 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5652 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5654 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5655 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5659 mono_custom_attrs_free (custom);
5662 custom = mono_custom_attrs_from_class (wrapped->klass);
5663 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5667 mono_custom_attrs_free (custom);
5670 /* not a P/Invoke after all */
5675 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5676 /* we use a separate basic block for the initialization code */
5677 NEW_BBLOCK (cfg, init_localsbb);
5678 cfg->bb_init = init_localsbb;
5679 init_localsbb->real_offset = cfg->real_offset;
5680 start_bblock->next_bb = init_localsbb;
5681 init_localsbb->next_bb = bblock;
5682 link_bblock (cfg, start_bblock, init_localsbb);
5683 link_bblock (cfg, init_localsbb, bblock);
5685 cfg->cbb = init_localsbb;
5687 start_bblock->next_bb = bblock;
5688 link_bblock (cfg, start_bblock, bblock);
5691 /* at this point we know, if security is TRUE, that some code needs to be generated */
5692 if (security && (cfg->method == method)) {
5695 mono_jit_stats.cas_demand_generation++;
5697 if (actions.demand.blob) {
5698 /* Add code for SecurityAction.Demand */
5699 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5700 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5701 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5702 mono_emit_method_call (cfg, secman->demand, args, NULL);
5704 if (actions.noncasdemand.blob) {
5705 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5706 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5707 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5708 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5709 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5710 mono_emit_method_call (cfg, secman->demand, args, NULL);
5712 if (actions.demandchoice.blob) {
5713 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5714 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5715 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5716 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5717 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5721 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5723 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5726 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5727 /* check if this is native code, e.g. an icall or a p/invoke */
5728 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5729 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5731 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5732 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5734 /* if this ia a native call then it can only be JITted from platform code */
5735 if ((icall || pinvk) && method->klass && method->klass->image) {
5736 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5737 MonoException *ex = icall ? mono_get_exception_security () :
5738 mono_get_exception_method_access ();
5739 emit_throw_exception (cfg, ex);
5746 if (header->code_size == 0)
5749 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5754 if (cfg->method == method)
5755 mono_debug_init_method (cfg, bblock, breakpoint_id);
5757 for (n = 0; n < header->num_locals; ++n) {
5758 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5763 /* We force the vtable variable here for all shared methods
5764 for the possibility that they might show up in a stack
5765 trace where their exact instantiation is needed. */
5766 if (cfg->generic_sharing_context && method == cfg->method) {
5767 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5768 mini_method_get_context (method)->method_inst ||
5769 method->klass->valuetype) {
5770 mono_get_vtable_var (cfg);
5772 /* FIXME: Is there a better way to do this?
5773 We need the variable live for the duration
5774 of the whole method. */
5775 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5779 /* add a check for this != NULL to inlined methods */
5780 if (is_virtual_call) {
5783 NEW_ARGLOAD (cfg, arg_ins, 0);
5784 MONO_ADD_INS (cfg->cbb, arg_ins);
5785 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5788 skip_dead_blocks = !dont_verify;
5789 if (skip_dead_blocks) {
5790 original_bb = bb = mono_basic_block_split (method, &error);
5791 if (!mono_error_ok (&error)) {
5792 mono_error_cleanup (&error);
5798 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5799 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5802 start_new_bblock = 0;
5805 if (cfg->method == method)
5806 cfg->real_offset = ip - header->code;
5808 cfg->real_offset = inline_offset;
5813 if (start_new_bblock) {
5814 bblock->cil_length = ip - bblock->cil_code;
5815 if (start_new_bblock == 2) {
5816 g_assert (ip == tblock->cil_code);
5818 GET_BBLOCK (cfg, tblock, ip);
5820 bblock->next_bb = tblock;
5823 start_new_bblock = 0;
5824 for (i = 0; i < bblock->in_scount; ++i) {
5825 if (cfg->verbose_level > 3)
5826 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5827 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5831 g_slist_free (class_inits);
5834 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5835 link_bblock (cfg, bblock, tblock);
5836 if (sp != stack_start) {
5837 handle_stack_args (cfg, stack_start, sp - stack_start);
5839 CHECK_UNVERIFIABLE (cfg);
5841 bblock->next_bb = tblock;
5844 for (i = 0; i < bblock->in_scount; ++i) {
5845 if (cfg->verbose_level > 3)
5846 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5847 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5850 g_slist_free (class_inits);
5855 if (skip_dead_blocks) {
5856 int ip_offset = ip - header->code;
5858 if (ip_offset == bb->end)
5862 int op_size = mono_opcode_size (ip, end);
5863 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5865 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5867 if (ip_offset + op_size == bb->end) {
5868 MONO_INST_NEW (cfg, ins, OP_NOP);
5869 MONO_ADD_INS (bblock, ins);
5870 start_new_bblock = 1;
5878 * Sequence points are points where the debugger can place a breakpoint.
5879 * Currently, we generate these automatically at points where the IL
5882 if (seq_points && sp == stack_start) {
5883 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5884 MONO_ADD_INS (cfg->cbb, ins);
5887 bblock->real_offset = cfg->real_offset;
5889 if ((cfg->method == method) && cfg->coverage_info) {
5890 guint32 cil_offset = ip - header->code;
5891 cfg->coverage_info->data [cil_offset].cil_code = ip;
5893 /* TODO: Use an increment here */
5894 #if defined(TARGET_X86)
5895 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5896 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5898 MONO_ADD_INS (cfg->cbb, ins);
5900 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5901 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5905 if (cfg->verbose_level > 3)
5906 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5910 if (cfg->keep_cil_nops)
5911 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5913 MONO_INST_NEW (cfg, ins, OP_NOP);
5915 MONO_ADD_INS (bblock, ins);
5918 if (should_insert_brekpoint (cfg->method))
5919 MONO_INST_NEW (cfg, ins, OP_BREAK);
5921 MONO_INST_NEW (cfg, ins, OP_NOP);
5923 MONO_ADD_INS (bblock, ins);
5929 CHECK_STACK_OVF (1);
5930 n = (*ip)-CEE_LDARG_0;
5932 EMIT_NEW_ARGLOAD (cfg, ins, n);
5940 CHECK_STACK_OVF (1);
5941 n = (*ip)-CEE_LDLOC_0;
5943 EMIT_NEW_LOCLOAD (cfg, ins, n);
5952 n = (*ip)-CEE_STLOC_0;
5955 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5957 emit_stloc_ir (cfg, sp, header, n);
5964 CHECK_STACK_OVF (1);
5967 EMIT_NEW_ARGLOAD (cfg, ins, n);
5973 CHECK_STACK_OVF (1);
5976 NEW_ARGLOADA (cfg, ins, n);
5977 MONO_ADD_INS (cfg->cbb, ins);
5987 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5989 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5994 CHECK_STACK_OVF (1);
5997 EMIT_NEW_LOCLOAD (cfg, ins, n);
6001 case CEE_LDLOCA_S: {
6002 unsigned char *tmp_ip;
6004 CHECK_STACK_OVF (1);
6005 CHECK_LOCAL (ip [1]);
6007 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6013 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6022 CHECK_LOCAL (ip [1]);
6023 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6025 emit_stloc_ir (cfg, sp, header, ip [1]);
6030 CHECK_STACK_OVF (1);
6031 EMIT_NEW_PCONST (cfg, ins, NULL);
6032 ins->type = STACK_OBJ;
6037 CHECK_STACK_OVF (1);
6038 EMIT_NEW_ICONST (cfg, ins, -1);
6051 CHECK_STACK_OVF (1);
6052 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6058 CHECK_STACK_OVF (1);
6060 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6066 CHECK_STACK_OVF (1);
6067 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6073 CHECK_STACK_OVF (1);
6074 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6075 ins->type = STACK_I8;
6076 ins->dreg = alloc_dreg (cfg, STACK_I8);
6078 ins->inst_l = (gint64)read64 (ip);
6079 MONO_ADD_INS (bblock, ins);
6085 gboolean use_aotconst = FALSE;
6087 #ifdef TARGET_POWERPC
6088 /* FIXME: Clean this up */
6089 if (cfg->compile_aot)
6090 use_aotconst = TRUE;
6093 /* FIXME: we should really allocate this only late in the compilation process */
6094 f = mono_domain_alloc (cfg->domain, sizeof (float));
6096 CHECK_STACK_OVF (1);
6102 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6104 dreg = alloc_freg (cfg);
6105 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6106 ins->type = STACK_R8;
6108 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6109 ins->type = STACK_R8;
6110 ins->dreg = alloc_dreg (cfg, STACK_R8);
6112 MONO_ADD_INS (bblock, ins);
6122 gboolean use_aotconst = FALSE;
6124 #ifdef TARGET_POWERPC
6125 /* FIXME: Clean this up */
6126 if (cfg->compile_aot)
6127 use_aotconst = TRUE;
6130 /* FIXME: we should really allocate this only late in the compilation process */
6131 d = mono_domain_alloc (cfg->domain, sizeof (double));
6133 CHECK_STACK_OVF (1);
6139 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6141 dreg = alloc_freg (cfg);
6142 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6143 ins->type = STACK_R8;
6145 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6146 ins->type = STACK_R8;
6147 ins->dreg = alloc_dreg (cfg, STACK_R8);
6149 MONO_ADD_INS (bblock, ins);
6158 MonoInst *temp, *store;
6160 CHECK_STACK_OVF (1);
6164 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6165 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6167 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6170 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6183 if (sp [0]->type == STACK_R8)
6184 /* we need to pop the value from the x86 FP stack */
6185 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6194 if (stack_start != sp)
6196 token = read32 (ip + 1);
6197 /* FIXME: check the signature matches */
6198 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6200 if (!cmethod || mono_loader_get_last_error ())
6203 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6204 GENERIC_SHARING_FAILURE (CEE_JMP);
6206 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6207 CHECK_CFG_EXCEPTION;
6209 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6211 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6214 /* Handle tail calls similarly to calls */
6215 n = fsig->param_count + fsig->hasthis;
6217 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6218 call->method = cmethod;
6219 call->tail_call = TRUE;
6220 call->signature = mono_method_signature (cmethod);
6221 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6222 call->inst.inst_p0 = cmethod;
6223 for (i = 0; i < n; ++i)
6224 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6226 mono_arch_emit_call (cfg, call);
6227 MONO_ADD_INS (bblock, (MonoInst*)call);
6230 for (i = 0; i < num_args; ++i)
6231 /* Prevent arguments from being optimized away */
6232 arg_array [i]->flags |= MONO_INST_VOLATILE;
6234 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6235 ins = (MonoInst*)call;
6236 ins->inst_p0 = cmethod;
6237 MONO_ADD_INS (bblock, ins);
6241 start_new_bblock = 1;
6246 case CEE_CALLVIRT: {
6247 MonoInst *addr = NULL;
6248 MonoMethodSignature *fsig = NULL;
6250 int virtual = *ip == CEE_CALLVIRT;
6251 int calli = *ip == CEE_CALLI;
6252 gboolean pass_imt_from_rgctx = FALSE;
6253 MonoInst *imt_arg = NULL;
6254 gboolean pass_vtable = FALSE;
6255 gboolean pass_mrgctx = FALSE;
6256 MonoInst *vtable_arg = NULL;
6257 gboolean check_this = FALSE;
6258 gboolean supported_tail_call = FALSE;
6261 token = read32 (ip + 1);
6268 if (method->wrapper_type != MONO_WRAPPER_NONE)
6269 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6271 fsig = mono_metadata_parse_signature (image, token);
6273 n = fsig->param_count + fsig->hasthis;
6275 if (method->dynamic && fsig->pinvoke) {
6279 * This is a call through a function pointer using a pinvoke
6280 * signature. Have to create a wrapper and call that instead.
6281 * FIXME: This is very slow, need to create a wrapper at JIT time
6282 * instead based on the signature.
6284 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6285 EMIT_NEW_PCONST (cfg, args [1], fsig);
6287 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6290 MonoMethod *cil_method;
6292 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6293 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6294 cil_method = cmethod;
6295 } else if (constrained_call) {
6296 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6298 * This is needed since get_method_constrained can't find
6299 * the method in klass representing a type var.
6300 * The type var is guaranteed to be a reference type in this
6303 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6304 cil_method = cmethod;
6305 g_assert (!cmethod->klass->valuetype);
6307 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6310 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6311 cil_method = cmethod;
6314 if (!cmethod || mono_loader_get_last_error ())
6316 if (!dont_verify && !cfg->skip_visibility) {
6317 MonoMethod *target_method = cil_method;
6318 if (method->is_inflated) {
6319 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6321 if (!mono_method_can_access_method (method_definition, target_method) &&
6322 !mono_method_can_access_method (method, cil_method))
6323 METHOD_ACCESS_FAILURE;
6326 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6327 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6329 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6330 /* MS.NET seems to silently convert this to a callvirt */
6335 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6336 * converts to a callvirt.
6338 * tests/bug-515884.il is an example of this behavior
6340 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6341 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6342 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6346 if (!cmethod->klass->inited)
6347 if (!mono_class_init (cmethod->klass))
6350 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6351 mini_class_is_system_array (cmethod->klass)) {
6352 array_rank = cmethod->klass->rank;
6353 fsig = mono_method_signature (cmethod);
6355 fsig = mono_method_signature (cmethod);
6360 if (fsig->pinvoke) {
6361 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6362 check_for_pending_exc, FALSE);
6363 fsig = mono_method_signature (wrapper);
6364 } else if (constrained_call) {
6365 fsig = mono_method_signature (cmethod);
6367 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6371 mono_save_token_info (cfg, image, token, cil_method);
6373 n = fsig->param_count + fsig->hasthis;
6375 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6376 if (check_linkdemand (cfg, method, cmethod))
6378 CHECK_CFG_EXCEPTION;
6381 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6382 g_assert_not_reached ();
6385 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6388 if (!cfg->generic_sharing_context && cmethod)
6389 g_assert (!mono_method_check_context_used (cmethod));
6393 //g_assert (!virtual || fsig->hasthis);
6397 if (constrained_call) {
6399 * We have the `constrained.' prefix opcode.
6401 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6403 * The type parameter is instantiated as a valuetype,
6404 * but that type doesn't override the method we're
6405 * calling, so we need to box `this'.
6407 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6408 ins->klass = constrained_call;
6409 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6410 CHECK_CFG_EXCEPTION;
6411 } else if (!constrained_call->valuetype) {
6412 int dreg = alloc_preg (cfg);
6415 * The type parameter is instantiated as a reference
6416 * type. We have a managed pointer on the stack, so
6417 * we need to dereference it here.
6419 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6420 ins->type = STACK_OBJ;
6422 } else if (cmethod->klass->valuetype)
6424 constrained_call = NULL;
6427 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6431 * If the callee is a shared method, then its static cctor
6432 * might not get called after the call was patched.
6434 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6435 emit_generic_class_init (cfg, cmethod->klass);
6436 CHECK_TYPELOAD (cmethod->klass);
6439 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6440 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6441 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6442 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6443 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6446 * Pass vtable iff target method might
6447 * be shared, which means that sharing
6448 * is enabled for its class and its
6449 * context is sharable (and it's not a
6452 if (sharing_enabled && context_sharable &&
6453 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6457 if (cmethod && mini_method_get_context (cmethod) &&
6458 mini_method_get_context (cmethod)->method_inst) {
6459 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6460 MonoGenericContext *context = mini_method_get_context (cmethod);
6461 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6463 g_assert (!pass_vtable);
6465 if (sharing_enabled && context_sharable)
6469 if (cfg->generic_sharing_context && cmethod) {
6470 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6472 context_used = mono_method_check_context_used (cmethod);
6474 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6475 /* Generic method interface
6476 calls are resolved via a
6477 helper function and don't
6479 if (!cmethod_context || !cmethod_context->method_inst)
6480 pass_imt_from_rgctx = TRUE;
6484 * If a shared method calls another
6485 * shared method then the caller must
6486 * have a generic sharing context
6487 * because the magic trampoline
6488 * requires it. FIXME: We shouldn't
6489 * have to force the vtable/mrgctx
6490 * variable here. Instead there
6491 * should be a flag in the cfg to
6492 * request a generic sharing context.
6495 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6496 mono_get_vtable_var (cfg);
6501 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6503 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6505 CHECK_TYPELOAD (cmethod->klass);
6506 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6511 g_assert (!vtable_arg);
6513 if (!cfg->compile_aot) {
6515 * emit_get_rgctx_method () calls mono_class_vtable () so check
6516 * for type load errors before.
6518 mono_class_setup_vtable (cmethod->klass);
6519 CHECK_TYPELOAD (cmethod->klass);
6522 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6524 /* !marshalbyref is needed to properly handle generic methods + remoting */
6525 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6526 MONO_METHOD_IS_FINAL (cmethod)) &&
6527 !cmethod->klass->marshalbyref) {
6534 if (pass_imt_from_rgctx) {
6535 g_assert (!pass_vtable);
6538 imt_arg = emit_get_rgctx_method (cfg, context_used,
6539 cmethod, MONO_RGCTX_INFO_METHOD);
6543 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6545 /* Calling virtual generic methods */
6546 if (cmethod && virtual &&
6547 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6548 !(MONO_METHOD_IS_FINAL (cmethod) &&
6549 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6550 mono_method_signature (cmethod)->generic_param_count) {
6551 MonoInst *this_temp, *this_arg_temp, *store;
6552 MonoInst *iargs [4];
6554 g_assert (mono_method_signature (cmethod)->is_inflated);
6556 /* Prevent inlining of methods that contain indirect calls */
6559 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6560 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6561 g_assert (!imt_arg);
6563 g_assert (cmethod->is_inflated);
6564 imt_arg = emit_get_rgctx_method (cfg, context_used,
6565 cmethod, MONO_RGCTX_INFO_METHOD);
6566 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6570 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6571 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6572 MONO_ADD_INS (bblock, store);
6574 /* FIXME: This should be a managed pointer */
6575 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6577 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6578 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6579 cmethod, MONO_RGCTX_INFO_METHOD);
6580 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6581 addr = mono_emit_jit_icall (cfg,
6582 mono_helper_compile_generic_method, iargs);
6584 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6586 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6589 if (!MONO_TYPE_IS_VOID (fsig->ret))
6590 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6592 CHECK_CFG_EXCEPTION;
6599 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6600 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6602 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6606 /* FIXME: runtime generic context pointer for jumps? */
6607 /* FIXME: handle this for generic sharing eventually */
6608 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6611 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6614 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6615 /* Handle tail calls similarly to calls */
6616 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6618 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6619 call->tail_call = TRUE;
6620 call->method = cmethod;
6621 call->signature = mono_method_signature (cmethod);
6624 * We implement tail calls by storing the actual arguments into the
6625 * argument variables, then emitting a CEE_JMP.
6627 for (i = 0; i < n; ++i) {
6628 /* Prevent argument from being register allocated */
6629 arg_array [i]->flags |= MONO_INST_VOLATILE;
6630 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6634 ins = (MonoInst*)call;
6635 ins->inst_p0 = cmethod;
6636 ins->inst_p1 = arg_array [0];
6637 MONO_ADD_INS (bblock, ins);
6638 link_bblock (cfg, bblock, end_bblock);
6639 start_new_bblock = 1;
6641 CHECK_CFG_EXCEPTION;
6643 /* skip CEE_RET as well */
6649 /* Conversion to a JIT intrinsic */
6650 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6652 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6653 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6658 CHECK_CFG_EXCEPTION;
6666 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6667 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6668 mono_method_check_inlining (cfg, cmethod) &&
6669 !g_list_find (dont_inline, cmethod)) {
6671 gboolean allways = FALSE;
6673 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6674 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6675 /* Prevent inlining of methods that call wrappers */
6677 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6681 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6683 cfg->real_offset += 5;
6686 if (!MONO_TYPE_IS_VOID (fsig->ret))
6687 /* *sp is already set by inline_method */
6690 inline_costs += costs;
6696 inline_costs += 10 * num_calls++;
6698 /* Tail recursion elimination */
6699 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6700 gboolean has_vtargs = FALSE;
6703 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6706 /* keep it simple */
6707 for (i = fsig->param_count - 1; i >= 0; i--) {
6708 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6713 for (i = 0; i < n; ++i)
6714 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6715 MONO_INST_NEW (cfg, ins, OP_BR);
6716 MONO_ADD_INS (bblock, ins);
6717 tblock = start_bblock->out_bb [0];
6718 link_bblock (cfg, bblock, tblock);
6719 ins->inst_target_bb = tblock;
6720 start_new_bblock = 1;
6722 /* skip the CEE_RET, too */
6723 if (ip_in_bb (cfg, bblock, ip + 5))
6733 /* Generic sharing */
6734 /* FIXME: only do this for generic methods if
6735 they are not shared! */
6736 if (context_used && !imt_arg && !array_rank &&
6737 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6738 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6739 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6740 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6743 g_assert (cfg->generic_sharing_context && cmethod);
6747 * We are compiling a call to a
6748 * generic method from shared code,
6749 * which means that we have to look up
6750 * the method in the rgctx and do an
6753 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6756 /* Indirect calls */
6758 g_assert (!imt_arg);
6760 if (*ip == CEE_CALL)
6761 g_assert (context_used);
6762 else if (*ip == CEE_CALLI)
6763 g_assert (!vtable_arg);
6765 /* FIXME: what the hell is this??? */
6766 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6767 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6769 /* Prevent inlining of methods with indirect calls */
6774 int rgctx_reg = mono_alloc_preg (cfg);
6776 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6777 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6778 call = (MonoCallInst*)ins;
6779 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6781 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6783 * Instead of emitting an indirect call, emit a direct call
6784 * with the contents of the aotconst as the patch info.
6786 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6788 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6789 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6792 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6795 if (!MONO_TYPE_IS_VOID (fsig->ret))
6796 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6798 CHECK_CFG_EXCEPTION;
6809 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6810 if (sp [fsig->param_count]->type == STACK_OBJ) {
6811 MonoInst *iargs [2];
6814 iargs [1] = sp [fsig->param_count];
6816 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6819 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6820 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6821 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6822 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6824 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6827 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6828 if (!cmethod->klass->element_class->valuetype && !readonly)
6829 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6830 CHECK_TYPELOAD (cmethod->klass);
6833 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6836 g_assert_not_reached ();
6839 CHECK_CFG_EXCEPTION;
6846 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6848 if (!MONO_TYPE_IS_VOID (fsig->ret))
6849 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6851 CHECK_CFG_EXCEPTION;
6861 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6863 } else if (imt_arg) {
6864 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6866 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6869 if (!MONO_TYPE_IS_VOID (fsig->ret))
6870 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6872 CHECK_CFG_EXCEPTION;
6879 if (cfg->method != method) {
6880 /* return from inlined method */
6882 * If in_count == 0, that means the ret is unreachable due to
6883 * being preceeded by a throw. In that case, inline_method () will
6884 * handle setting the return value
6885 * (test case: test_0_inline_throw ()).
6887 if (return_var && cfg->cbb->in_count) {
6891 //g_assert (returnvar != -1);
6892 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6893 cfg->ret_var_set = TRUE;
6897 MonoType *ret_type = mono_method_signature (method)->ret;
6901 * Place a seq point here too even through the IL stack is not
6902 * empty, so a step over on
6905 * will work correctly.
6907 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6908 MONO_ADD_INS (cfg->cbb, ins);
6911 g_assert (!return_var);
6914 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6917 if (!cfg->vret_addr) {
6920 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6922 EMIT_NEW_RETLOADA (cfg, ret_addr);
6924 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6925 ins->klass = mono_class_from_mono_type (ret_type);
6928 #ifdef MONO_ARCH_SOFT_FLOAT
6929 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6930 MonoInst *iargs [1];
6934 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6935 mono_arch_emit_setret (cfg, method, conv);
6937 mono_arch_emit_setret (cfg, method, *sp);
6940 mono_arch_emit_setret (cfg, method, *sp);
6945 if (sp != stack_start)
6947 MONO_INST_NEW (cfg, ins, OP_BR);
6949 ins->inst_target_bb = end_bblock;
6950 MONO_ADD_INS (bblock, ins);
6951 link_bblock (cfg, bblock, end_bblock);
6952 start_new_bblock = 1;
6956 MONO_INST_NEW (cfg, ins, OP_BR);
6958 target = ip + 1 + (signed char)(*ip);
6960 GET_BBLOCK (cfg, tblock, target);
6961 link_bblock (cfg, bblock, tblock);
6962 ins->inst_target_bb = tblock;
6963 if (sp != stack_start) {
6964 handle_stack_args (cfg, stack_start, sp - stack_start);
6966 CHECK_UNVERIFIABLE (cfg);
6968 MONO_ADD_INS (bblock, ins);
6969 start_new_bblock = 1;
6970 inline_costs += BRANCH_COST;
6984 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6986 target = ip + 1 + *(signed char*)ip;
6992 inline_costs += BRANCH_COST;
6996 MONO_INST_NEW (cfg, ins, OP_BR);
6999 target = ip + 4 + (gint32)read32(ip);
7001 GET_BBLOCK (cfg, tblock, target);
7002 link_bblock (cfg, bblock, tblock);
7003 ins->inst_target_bb = tblock;
7004 if (sp != stack_start) {
7005 handle_stack_args (cfg, stack_start, sp - stack_start);
7007 CHECK_UNVERIFIABLE (cfg);
7010 MONO_ADD_INS (bblock, ins);
7012 start_new_bblock = 1;
7013 inline_costs += BRANCH_COST;
7020 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7021 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7022 guint32 opsize = is_short ? 1 : 4;
7024 CHECK_OPSIZE (opsize);
7026 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7029 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7034 GET_BBLOCK (cfg, tblock, target);
7035 link_bblock (cfg, bblock, tblock);
7036 GET_BBLOCK (cfg, tblock, ip);
7037 link_bblock (cfg, bblock, tblock);
7039 if (sp != stack_start) {
7040 handle_stack_args (cfg, stack_start, sp - stack_start);
7041 CHECK_UNVERIFIABLE (cfg);
7044 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7045 cmp->sreg1 = sp [0]->dreg;
7046 type_from_op (cmp, sp [0], NULL);
7049 #if SIZEOF_REGISTER == 4
7050 if (cmp->opcode == OP_LCOMPARE_IMM) {
7051 /* Convert it to OP_LCOMPARE */
7052 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7053 ins->type = STACK_I8;
7054 ins->dreg = alloc_dreg (cfg, STACK_I8);
7056 MONO_ADD_INS (bblock, ins);
7057 cmp->opcode = OP_LCOMPARE;
7058 cmp->sreg2 = ins->dreg;
7061 MONO_ADD_INS (bblock, cmp);
7063 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7064 type_from_op (ins, sp [0], NULL);
7065 MONO_ADD_INS (bblock, ins);
7066 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7067 GET_BBLOCK (cfg, tblock, target);
7068 ins->inst_true_bb = tblock;
7069 GET_BBLOCK (cfg, tblock, ip);
7070 ins->inst_false_bb = tblock;
7071 start_new_bblock = 2;
7074 inline_costs += BRANCH_COST;
7089 MONO_INST_NEW (cfg, ins, *ip);
7091 target = ip + 4 + (gint32)read32(ip);
7097 inline_costs += BRANCH_COST;
7101 MonoBasicBlock **targets;
7102 MonoBasicBlock *default_bblock;
7103 MonoJumpInfoBBTable *table;
7104 int offset_reg = alloc_preg (cfg);
7105 int target_reg = alloc_preg (cfg);
7106 int table_reg = alloc_preg (cfg);
7107 int sum_reg = alloc_preg (cfg);
7108 gboolean use_op_switch;
7112 n = read32 (ip + 1);
7115 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7119 CHECK_OPSIZE (n * sizeof (guint32));
7120 target = ip + n * sizeof (guint32);
7122 GET_BBLOCK (cfg, default_bblock, target);
7123 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7125 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7126 for (i = 0; i < n; ++i) {
7127 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7128 targets [i] = tblock;
7129 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7133 if (sp != stack_start) {
7135 * Link the current bb with the targets as well, so handle_stack_args
7136 * will set their in_stack correctly.
7138 link_bblock (cfg, bblock, default_bblock);
7139 for (i = 0; i < n; ++i)
7140 link_bblock (cfg, bblock, targets [i]);
7142 handle_stack_args (cfg, stack_start, sp - stack_start);
7144 CHECK_UNVERIFIABLE (cfg);
7147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7151 for (i = 0; i < n; ++i)
7152 link_bblock (cfg, bblock, targets [i]);
7154 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7155 table->table = targets;
7156 table->table_size = n;
7158 use_op_switch = FALSE;
7160 /* ARM implements SWITCH statements differently */
7161 /* FIXME: Make it use the generic implementation */
7162 if (!cfg->compile_aot)
7163 use_op_switch = TRUE;
7166 if (COMPILE_LLVM (cfg))
7167 use_op_switch = TRUE;
7169 cfg->cbb->has_jump_table = 1;
7171 if (use_op_switch) {
7172 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7173 ins->sreg1 = src1->dreg;
7174 ins->inst_p0 = table;
7175 ins->inst_many_bb = targets;
7176 ins->klass = GUINT_TO_POINTER (n);
7177 MONO_ADD_INS (cfg->cbb, ins);
7179 if (sizeof (gpointer) == 8)
7180 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7182 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7184 #if SIZEOF_REGISTER == 8
7185 /* The upper word might not be zero, and we add it to a 64 bit address later */
7186 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7189 if (cfg->compile_aot) {
7190 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7192 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7193 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7194 ins->inst_p0 = table;
7195 ins->dreg = table_reg;
7196 MONO_ADD_INS (cfg->cbb, ins);
7199 /* FIXME: Use load_memindex */
7200 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7201 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7202 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7204 start_new_bblock = 1;
7205 inline_costs += (BRANCH_COST * 2);
7225 dreg = alloc_freg (cfg);
7228 dreg = alloc_lreg (cfg);
7231 dreg = alloc_preg (cfg);
7234 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7235 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7236 ins->flags |= ins_flag;
7238 MONO_ADD_INS (bblock, ins);
7253 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7254 ins->flags |= ins_flag;
7256 MONO_ADD_INS (bblock, ins);
7258 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7259 emit_write_barrier (cfg, sp [0], sp [1], -1);
7268 MONO_INST_NEW (cfg, ins, (*ip));
7270 ins->sreg1 = sp [0]->dreg;
7271 ins->sreg2 = sp [1]->dreg;
7272 type_from_op (ins, sp [0], sp [1]);
7274 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7276 /* Use the immediate opcodes if possible */
7277 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7278 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7279 if (imm_opcode != -1) {
7280 ins->opcode = imm_opcode;
7281 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7284 sp [1]->opcode = OP_NOP;
7288 MONO_ADD_INS ((cfg)->cbb, (ins));
7290 *sp++ = mono_decompose_opcode (cfg, ins);
7307 MONO_INST_NEW (cfg, ins, (*ip));
7309 ins->sreg1 = sp [0]->dreg;
7310 ins->sreg2 = sp [1]->dreg;
7311 type_from_op (ins, sp [0], sp [1]);
7313 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7314 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7316 /* FIXME: Pass opcode to is_inst_imm */
7318 /* Use the immediate opcodes if possible */
7319 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7322 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7323 if (imm_opcode != -1) {
7324 ins->opcode = imm_opcode;
7325 if (sp [1]->opcode == OP_I8CONST) {
7326 #if SIZEOF_REGISTER == 8
7327 ins->inst_imm = sp [1]->inst_l;
7329 ins->inst_ls_word = sp [1]->inst_ls_word;
7330 ins->inst_ms_word = sp [1]->inst_ms_word;
7334 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7337 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7338 if (sp [1]->next == NULL)
7339 sp [1]->opcode = OP_NOP;
7342 MONO_ADD_INS ((cfg)->cbb, (ins));
7344 *sp++ = mono_decompose_opcode (cfg, ins);
7357 case CEE_CONV_OVF_I8:
7358 case CEE_CONV_OVF_U8:
7362 /* Special case this earlier so we have long constants in the IR */
7363 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7364 int data = sp [-1]->inst_c0;
7365 sp [-1]->opcode = OP_I8CONST;
7366 sp [-1]->type = STACK_I8;
7367 #if SIZEOF_REGISTER == 8
7368 if ((*ip) == CEE_CONV_U8)
7369 sp [-1]->inst_c0 = (guint32)data;
7371 sp [-1]->inst_c0 = data;
7373 sp [-1]->inst_ls_word = data;
7374 if ((*ip) == CEE_CONV_U8)
7375 sp [-1]->inst_ms_word = 0;
7377 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7379 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7386 case CEE_CONV_OVF_I4:
7387 case CEE_CONV_OVF_I1:
7388 case CEE_CONV_OVF_I2:
7389 case CEE_CONV_OVF_I:
7390 case CEE_CONV_OVF_U:
7393 if (sp [-1]->type == STACK_R8) {
7394 ADD_UNOP (CEE_CONV_OVF_I8);
7401 case CEE_CONV_OVF_U1:
7402 case CEE_CONV_OVF_U2:
7403 case CEE_CONV_OVF_U4:
7406 if (sp [-1]->type == STACK_R8) {
7407 ADD_UNOP (CEE_CONV_OVF_U8);
7414 case CEE_CONV_OVF_I1_UN:
7415 case CEE_CONV_OVF_I2_UN:
7416 case CEE_CONV_OVF_I4_UN:
7417 case CEE_CONV_OVF_I8_UN:
7418 case CEE_CONV_OVF_U1_UN:
7419 case CEE_CONV_OVF_U2_UN:
7420 case CEE_CONV_OVF_U4_UN:
7421 case CEE_CONV_OVF_U8_UN:
7422 case CEE_CONV_OVF_I_UN:
7423 case CEE_CONV_OVF_U_UN:
7430 CHECK_CFG_EXCEPTION;
7434 case CEE_ADD_OVF_UN:
7436 case CEE_MUL_OVF_UN:
7438 case CEE_SUB_OVF_UN:
7446 token = read32 (ip + 1);
7447 klass = mini_get_class (method, token, generic_context);
7448 CHECK_TYPELOAD (klass);
7450 if (generic_class_is_reference_type (cfg, klass)) {
7451 MonoInst *store, *load;
7452 int dreg = alloc_preg (cfg);
7454 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7455 load->flags |= ins_flag;
7456 MONO_ADD_INS (cfg->cbb, load);
7458 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7459 store->flags |= ins_flag;
7460 MONO_ADD_INS (cfg->cbb, store);
7462 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7463 emit_write_barrier (cfg, sp [0], sp [1], -1);
7465 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7477 token = read32 (ip + 1);
7478 klass = mini_get_class (method, token, generic_context);
7479 CHECK_TYPELOAD (klass);
7481 /* Optimize the common ldobj+stloc combination */
7491 loc_index = ip [5] - CEE_STLOC_0;
7498 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7499 CHECK_LOCAL (loc_index);
7501 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7502 ins->dreg = cfg->locals [loc_index]->dreg;
7508 /* Optimize the ldobj+stobj combination */
7509 /* The reference case ends up being a load+store anyway */
7510 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7515 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7522 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7531 CHECK_STACK_OVF (1);
7533 n = read32 (ip + 1);
7535 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7536 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7537 ins->type = STACK_OBJ;
7540 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7541 MonoInst *iargs [1];
7543 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7544 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7546 if (cfg->opt & MONO_OPT_SHARED) {
7547 MonoInst *iargs [3];
7549 if (cfg->compile_aot) {
7550 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7552 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7553 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7554 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7555 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7556 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7558 if (bblock->out_of_line) {
7559 MonoInst *iargs [2];
7561 if (image == mono_defaults.corlib) {
7563 * Avoid relocations in AOT and save some space by using a
7564 * version of helper_ldstr specialized to mscorlib.
7566 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7567 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7569 /* Avoid creating the string object */
7570 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7571 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7572 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7576 if (cfg->compile_aot) {
7577 NEW_LDSTRCONST (cfg, ins, image, n);
7579 MONO_ADD_INS (bblock, ins);
7582 NEW_PCONST (cfg, ins, NULL);
7583 ins->type = STACK_OBJ;
7584 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7586 MONO_ADD_INS (bblock, ins);
7595 MonoInst *iargs [2];
7596 MonoMethodSignature *fsig;
7599 MonoInst *vtable_arg = NULL;
7602 token = read32 (ip + 1);
7603 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7604 if (!cmethod || mono_loader_get_last_error ())
7606 fsig = mono_method_get_signature (cmethod, image, token);
7610 mono_save_token_info (cfg, image, token, cmethod);
7612 if (!mono_class_init (cmethod->klass))
7615 if (cfg->generic_sharing_context)
7616 context_used = mono_method_check_context_used (cmethod);
7618 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7619 if (check_linkdemand (cfg, method, cmethod))
7621 CHECK_CFG_EXCEPTION;
7622 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7623 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7626 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7627 emit_generic_class_init (cfg, cmethod->klass);
7628 CHECK_TYPELOAD (cmethod->klass);
7631 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7632 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7633 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7634 mono_class_vtable (cfg->domain, cmethod->klass);
7635 CHECK_TYPELOAD (cmethod->klass);
7637 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7638 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7641 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7642 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7644 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7646 CHECK_TYPELOAD (cmethod->klass);
7647 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7652 n = fsig->param_count;
7656 * Generate smaller code for the common newobj <exception> instruction in
7657 * argument checking code.
7659 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7660 is_exception_class (cmethod->klass) && n <= 2 &&
7661 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7662 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7663 MonoInst *iargs [3];
7665 g_assert (!vtable_arg);
7669 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7672 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7676 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7681 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7684 g_assert_not_reached ();
7692 /* move the args to allow room for 'this' in the first position */
7698 /* check_call_signature () requires sp[0] to be set */
7699 this_ins.type = STACK_OBJ;
7701 if (check_call_signature (cfg, fsig, sp))
7706 if (mini_class_is_system_array (cmethod->klass)) {
7707 g_assert (!vtable_arg);
7709 *sp = emit_get_rgctx_method (cfg, context_used,
7710 cmethod, MONO_RGCTX_INFO_METHOD);
7712 /* Avoid varargs in the common case */
7713 if (fsig->param_count == 1)
7714 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7715 else if (fsig->param_count == 2)
7716 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7717 else if (fsig->param_count == 3)
7718 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7720 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7721 } else if (cmethod->string_ctor) {
7722 g_assert (!context_used);
7723 g_assert (!vtable_arg);
7724 /* we simply pass a null pointer */
7725 EMIT_NEW_PCONST (cfg, *sp, NULL);
7726 /* now call the string ctor */
7727 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7729 MonoInst* callvirt_this_arg = NULL;
7731 if (cmethod->klass->valuetype) {
7732 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7733 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7734 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7739 * The code generated by mini_emit_virtual_call () expects
7740 * iargs [0] to be a boxed instance, but luckily the vcall
7741 * will be transformed into a normal call there.
7743 } else if (context_used) {
7744 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7747 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7749 CHECK_TYPELOAD (cmethod->klass);
7752 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7753 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7754 * As a workaround, we call class cctors before allocating objects.
7756 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7757 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7758 if (cfg->verbose_level > 2)
7759 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7760 class_inits = g_slist_prepend (class_inits, vtable);
7763 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7766 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7769 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7771 /* Now call the actual ctor */
7772 /* Avoid virtual calls to ctors if possible */
7773 if (cmethod->klass->marshalbyref)
7774 callvirt_this_arg = sp [0];
7777 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7778 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7779 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7784 CHECK_CFG_EXCEPTION;
7789 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7790 mono_method_check_inlining (cfg, cmethod) &&
7791 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7792 !g_list_find (dont_inline, cmethod)) {
7795 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7796 cfg->real_offset += 5;
7799 inline_costs += costs - 5;
7802 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7804 } else if (context_used &&
7805 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7806 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7807 MonoInst *cmethod_addr;
7809 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7810 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7812 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7815 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7816 callvirt_this_arg, NULL, vtable_arg);
7820 if (alloc == NULL) {
7822 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7823 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7837 token = read32 (ip + 1);
7838 klass = mini_get_class (method, token, generic_context);
7839 CHECK_TYPELOAD (klass);
7840 if (sp [0]->type != STACK_OBJ)
7843 if (cfg->generic_sharing_context)
7844 context_used = mono_class_check_context_used (klass);
7846 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7853 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7855 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7859 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7860 MonoMethod *mono_castclass;
7861 MonoInst *iargs [1];
7864 mono_castclass = mono_marshal_get_castclass (klass);
7867 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7868 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7869 g_assert (costs > 0);
7872 cfg->real_offset += 5;
7877 inline_costs += costs;
7880 ins = handle_castclass (cfg, klass, *sp, context_used);
7881 CHECK_CFG_EXCEPTION;
7891 token = read32 (ip + 1);
7892 klass = mini_get_class (method, token, generic_context);
7893 CHECK_TYPELOAD (klass);
7894 if (sp [0]->type != STACK_OBJ)
7897 if (cfg->generic_sharing_context)
7898 context_used = mono_class_check_context_used (klass);
7900 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7907 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7909 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7913 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7914 MonoMethod *mono_isinst;
7915 MonoInst *iargs [1];
7918 mono_isinst = mono_marshal_get_isinst (klass);
7921 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7922 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7923 g_assert (costs > 0);
7926 cfg->real_offset += 5;
7931 inline_costs += costs;
7934 ins = handle_isinst (cfg, klass, *sp, context_used);
7935 CHECK_CFG_EXCEPTION;
7942 case CEE_UNBOX_ANY: {
7946 token = read32 (ip + 1);
7947 klass = mini_get_class (method, token, generic_context);
7948 CHECK_TYPELOAD (klass);
7950 mono_save_token_info (cfg, image, token, klass);
7952 if (cfg->generic_sharing_context)
7953 context_used = mono_class_check_context_used (klass);
7955 if (generic_class_is_reference_type (cfg, klass)) {
7956 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7957 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7958 MonoMethod *mono_castclass;
7959 MonoInst *iargs [1];
7962 mono_castclass = mono_marshal_get_castclass (klass);
7965 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7966 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7968 g_assert (costs > 0);
7971 cfg->real_offset += 5;
7975 inline_costs += costs;
7977 ins = handle_castclass (cfg, klass, *sp, context_used);
7978 CHECK_CFG_EXCEPTION;
7986 if (mono_class_is_nullable (klass)) {
7987 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7994 ins = handle_unbox (cfg, klass, sp, context_used);
8000 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8013 token = read32 (ip + 1);
8014 klass = mini_get_class (method, token, generic_context);
8015 CHECK_TYPELOAD (klass);
8017 mono_save_token_info (cfg, image, token, klass);
8019 if (cfg->generic_sharing_context)
8020 context_used = mono_class_check_context_used (klass);
8022 if (generic_class_is_reference_type (cfg, klass)) {
8028 if (klass == mono_defaults.void_class)
8030 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8032 /* frequent check in generic code: box (struct), brtrue */
8034 // FIXME: LLVM can't handle the inconsistent bb linking
8035 if (!mono_class_is_nullable (klass) &&
8036 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8037 (ip [5] == CEE_BRTRUE ||
8038 ip [5] == CEE_BRTRUE_S ||
8039 ip [5] == CEE_BRFALSE ||
8040 ip [5] == CEE_BRFALSE_S)) {
8041 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8043 MonoBasicBlock *true_bb, *false_bb;
8047 if (cfg->verbose_level > 3) {
8048 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8049 printf ("<box+brtrue opt>\n");
8057 target = ip + 1 + (signed char)(*ip);
8064 target = ip + 4 + (gint)(read32 (ip));
8068 g_assert_not_reached ();
8072 * We need to link both bblocks, since it is needed for handling stack
8073 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8074 * Branching to only one of them would lead to inconsistencies, so
8075 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8077 GET_BBLOCK (cfg, true_bb, target);
8078 GET_BBLOCK (cfg, false_bb, ip);
8080 mono_link_bblock (cfg, cfg->cbb, true_bb);
8081 mono_link_bblock (cfg, cfg->cbb, false_bb);
8083 if (sp != stack_start) {
8084 handle_stack_args (cfg, stack_start, sp - stack_start);
8086 CHECK_UNVERIFIABLE (cfg);
8089 if (COMPILE_LLVM (cfg)) {
8090 dreg = alloc_ireg (cfg);
8091 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8092 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8094 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8096 /* The JIT can't eliminate the iconst+compare */
8097 MONO_INST_NEW (cfg, ins, OP_BR);
8098 ins->inst_target_bb = is_true ? true_bb : false_bb;
8099 MONO_ADD_INS (cfg->cbb, ins);
8102 start_new_bblock = 1;
8106 *sp++ = handle_box (cfg, val, klass, context_used);
8108 CHECK_CFG_EXCEPTION;
8117 token = read32 (ip + 1);
8118 klass = mini_get_class (method, token, generic_context);
8119 CHECK_TYPELOAD (klass);
8121 mono_save_token_info (cfg, image, token, klass);
8123 if (cfg->generic_sharing_context)
8124 context_used = mono_class_check_context_used (klass);
8126 if (mono_class_is_nullable (klass)) {
8129 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8130 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8134 ins = handle_unbox (cfg, klass, sp, context_used);
8144 MonoClassField *field;
8148 if (*ip == CEE_STFLD) {
8155 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8157 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8160 token = read32 (ip + 1);
8161 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8162 field = mono_method_get_wrapper_data (method, token);
8163 klass = field->parent;
8166 field = mono_field_from_token (image, token, &klass, generic_context);
8170 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8171 FIELD_ACCESS_FAILURE;
8172 mono_class_init (klass);
8174 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8175 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8176 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8177 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8180 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8181 if (*ip == CEE_STFLD) {
8182 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8184 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8185 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8186 MonoInst *iargs [5];
8189 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8190 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8191 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8195 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8196 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8197 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8198 g_assert (costs > 0);
8200 cfg->real_offset += 5;
8203 inline_costs += costs;
8205 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8210 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8212 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8213 if (sp [0]->opcode != OP_LDADDR)
8214 store->flags |= MONO_INST_FAULT;
8216 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8217 /* insert call to write barrier */
8221 dreg = alloc_preg (cfg);
8222 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8223 emit_write_barrier (cfg, ptr, sp [1], -1);
8226 store->flags |= ins_flag;
8233 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8234 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8235 MonoInst *iargs [4];
8238 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8239 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8240 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8241 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8242 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8243 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8245 g_assert (costs > 0);
8247 cfg->real_offset += 5;
8251 inline_costs += costs;
8253 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8257 if (sp [0]->type == STACK_VTYPE) {
8260 /* Have to compute the address of the variable */
8262 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8264 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8266 g_assert (var->klass == klass);
8268 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8272 if (*ip == CEE_LDFLDA) {
8273 if (sp [0]->type == STACK_OBJ) {
8274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8275 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8278 dreg = alloc_preg (cfg);
8280 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8281 ins->klass = mono_class_from_mono_type (field->type);
8282 ins->type = STACK_MP;
8287 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8289 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8290 load->flags |= ins_flag;
8291 if (sp [0]->opcode != OP_LDADDR)
8292 load->flags |= MONO_INST_FAULT;
8303 MonoClassField *field;
8304 gpointer addr = NULL;
8305 gboolean is_special_static;
8308 token = read32 (ip + 1);
8310 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8311 field = mono_method_get_wrapper_data (method, token);
8312 klass = field->parent;
8315 field = mono_field_from_token (image, token, &klass, generic_context);
8318 mono_class_init (klass);
8319 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8320 FIELD_ACCESS_FAILURE;
8322 /* if the class is Critical then transparent code cannot access it's fields */
8323 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8324 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8327 * We can only support shared generic static
8328 * field access on architectures where the
8329 * trampoline code has been extended to handle
8330 * the generic class init.
8332 #ifndef MONO_ARCH_VTABLE_REG
8333 GENERIC_SHARING_FAILURE (*ip);
8336 if (cfg->generic_sharing_context)
8337 context_used = mono_class_check_context_used (klass);
8339 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8341 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8342 * to be called here.
8344 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8345 mono_class_vtable (cfg->domain, klass);
8346 CHECK_TYPELOAD (klass);
8348 mono_domain_lock (cfg->domain);
8349 if (cfg->domain->special_static_fields)
8350 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8351 mono_domain_unlock (cfg->domain);
8353 is_special_static = mono_class_field_is_special_static (field);
8355 /* Generate IR to compute the field address */
8356 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8358 * Fast access to TLS data
8359 * Inline version of get_thread_static_data () in
8363 int idx, static_data_reg, array_reg, dreg;
8364 MonoInst *thread_ins;
8366 // offset &= 0x7fffffff;
8367 // idx = (offset >> 24) - 1;
8368 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8370 thread_ins = mono_get_thread_intrinsic (cfg);
8371 MONO_ADD_INS (cfg->cbb, thread_ins);
8372 static_data_reg = alloc_ireg (cfg);
8373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8375 if (cfg->compile_aot) {
8376 int offset_reg, offset2_reg, idx_reg;
8378 /* For TLS variables, this will return the TLS offset */
8379 EMIT_NEW_SFLDACONST (cfg, ins, field);
8380 offset_reg = ins->dreg;
8381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8382 idx_reg = alloc_ireg (cfg);
8383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8386 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8387 array_reg = alloc_ireg (cfg);
8388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8389 offset2_reg = alloc_ireg (cfg);
8390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8391 dreg = alloc_ireg (cfg);
8392 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8394 offset = (gsize)addr & 0x7fffffff;
8395 idx = (offset >> 24) - 1;
8397 array_reg = alloc_ireg (cfg);
8398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8399 dreg = alloc_ireg (cfg);
8400 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8402 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8403 (cfg->compile_aot && is_special_static) ||
8404 (context_used && is_special_static)) {
8405 MonoInst *iargs [2];
8407 g_assert (field->parent);
8408 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8410 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8411 field, MONO_RGCTX_INFO_CLASS_FIELD);
8413 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8415 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8416 } else if (context_used) {
8417 MonoInst *static_data;
8420 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8421 method->klass->name_space, method->klass->name, method->name,
8422 depth, field->offset);
8425 if (mono_class_needs_cctor_run (klass, method))
8426 emit_generic_class_init (cfg, klass);
8429 * The pointer we're computing here is
8431 * super_info.static_data + field->offset
8433 static_data = emit_get_rgctx_klass (cfg, context_used,
8434 klass, MONO_RGCTX_INFO_STATIC_DATA);
8436 if (field->offset == 0) {
8439 int addr_reg = mono_alloc_preg (cfg);
8440 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8442 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8443 MonoInst *iargs [2];
8445 g_assert (field->parent);
8446 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8447 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8448 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8450 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8452 CHECK_TYPELOAD (klass);
8454 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8455 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8456 if (cfg->verbose_level > 2)
8457 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8458 class_inits = g_slist_prepend (class_inits, vtable);
8460 if (cfg->run_cctors) {
8462 /* This makes so that inline cannot trigger */
8463 /* .cctors: too many apps depend on them */
8464 /* running with a specific order... */
8465 if (! vtable->initialized)
8467 ex = mono_runtime_class_init_full (vtable, FALSE);
8469 set_exception_object (cfg, ex);
8470 goto exception_exit;
8474 addr = (char*)vtable->data + field->offset;
8476 if (cfg->compile_aot)
8477 EMIT_NEW_SFLDACONST (cfg, ins, field);
8479 EMIT_NEW_PCONST (cfg, ins, addr);
8481 MonoInst *iargs [1];
8482 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8483 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8487 /* Generate IR to do the actual load/store operation */
8489 if (*ip == CEE_LDSFLDA) {
8490 ins->klass = mono_class_from_mono_type (field->type);
8491 ins->type = STACK_PTR;
8493 } else if (*ip == CEE_STSFLD) {
8498 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8499 store->flags |= ins_flag;
8501 gboolean is_const = FALSE;
8502 MonoVTable *vtable = NULL;
8504 if (!context_used) {
8505 vtable = mono_class_vtable (cfg->domain, klass);
8506 CHECK_TYPELOAD (klass);
8508 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8509 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8510 gpointer addr = (char*)vtable->data + field->offset;
8511 int ro_type = field->type->type;
8512 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8513 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8515 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8518 case MONO_TYPE_BOOLEAN:
8520 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8524 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8527 case MONO_TYPE_CHAR:
8529 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8533 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8538 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8542 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8548 case MONO_TYPE_FNPTR:
8549 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8550 type_to_eval_stack_type ((cfg), field->type, *sp);
8553 case MONO_TYPE_STRING:
8554 case MONO_TYPE_OBJECT:
8555 case MONO_TYPE_CLASS:
8556 case MONO_TYPE_SZARRAY:
8557 case MONO_TYPE_ARRAY:
8558 if (!mono_gc_is_moving ()) {
8559 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8560 type_to_eval_stack_type ((cfg), field->type, *sp);
8568 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8573 case MONO_TYPE_VALUETYPE:
8583 CHECK_STACK_OVF (1);
8585 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8586 load->flags |= ins_flag;
8599 token = read32 (ip + 1);
8600 klass = mini_get_class (method, token, generic_context);
8601 CHECK_TYPELOAD (klass);
8602 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8603 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8604 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8605 generic_class_is_reference_type (cfg, klass)) {
8606 /* insert call to write barrier */
8607 emit_write_barrier (cfg, sp [0], sp [1], -1);
8619 const char *data_ptr;
8621 guint32 field_token;
8627 token = read32 (ip + 1);
8629 klass = mini_get_class (method, token, generic_context);
8630 CHECK_TYPELOAD (klass);
8632 if (cfg->generic_sharing_context)
8633 context_used = mono_class_check_context_used (klass);
8635 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8636 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8637 ins->sreg1 = sp [0]->dreg;
8638 ins->type = STACK_I4;
8639 ins->dreg = alloc_ireg (cfg);
8640 MONO_ADD_INS (cfg->cbb, ins);
8641 *sp = mono_decompose_opcode (cfg, ins);
8646 MonoClass *array_class = mono_array_class_get (klass, 1);
8647 /* FIXME: we cannot get a managed
8648 allocator because we can't get the
8649 open generic class's vtable. We
8650 have the same problem in
8651 handle_alloc(). This
8652 needs to be solved so that we can
8653 have managed allocs of shared
8656 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8657 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8659 MonoMethod *managed_alloc = NULL;
8661 /* FIXME: Decompose later to help abcrem */
8664 args [0] = emit_get_rgctx_klass (cfg, context_used,
8665 array_class, MONO_RGCTX_INFO_VTABLE);
8670 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8672 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8674 if (cfg->opt & MONO_OPT_SHARED) {
8675 /* Decompose now to avoid problems with references to the domainvar */
8676 MonoInst *iargs [3];
8678 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8679 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8682 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8684 /* Decompose later since it is needed by abcrem */
8685 MonoClass *array_type = mono_array_class_get (klass, 1);
8686 mono_class_vtable (cfg->domain, array_type);
8687 CHECK_TYPELOAD (array_type);
8689 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8690 ins->dreg = alloc_preg (cfg);
8691 ins->sreg1 = sp [0]->dreg;
8692 ins->inst_newa_class = klass;
8693 ins->type = STACK_OBJ;
8695 MONO_ADD_INS (cfg->cbb, ins);
8696 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8697 cfg->cbb->has_array_access = TRUE;
8699 /* Needed so mono_emit_load_get_addr () gets called */
8700 mono_get_got_var (cfg);
8710 * we inline/optimize the initialization sequence if possible.
8711 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8712 * for small sizes open code the memcpy
8713 * ensure the rva field is big enough
8715 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8716 MonoMethod *memcpy_method = get_memcpy_method ();
8717 MonoInst *iargs [3];
8718 int add_reg = alloc_preg (cfg);
8720 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8721 if (cfg->compile_aot) {
8722 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8724 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8726 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8727 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8736 if (sp [0]->type != STACK_OBJ)
8739 dreg = alloc_preg (cfg);
8740 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8741 ins->dreg = alloc_preg (cfg);
8742 ins->sreg1 = sp [0]->dreg;
8743 ins->type = STACK_I4;
8744 /* This flag will be inherited by the decomposition */
8745 ins->flags |= MONO_INST_FAULT;
8746 MONO_ADD_INS (cfg->cbb, ins);
8747 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8748 cfg->cbb->has_array_access = TRUE;
8756 if (sp [0]->type != STACK_OBJ)
8759 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8761 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8762 CHECK_TYPELOAD (klass);
8763 /* we need to make sure that this array is exactly the type it needs
8764 * to be for correctness. the wrappers are lax with their usage
8765 * so we need to ignore them here
8767 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8768 MonoClass *array_class = mono_array_class_get (klass, 1);
8769 mini_emit_check_array_type (cfg, sp [0], array_class);
8770 CHECK_TYPELOAD (array_class);
8774 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8789 case CEE_LDELEM_REF: {
8795 if (*ip == CEE_LDELEM) {
8797 token = read32 (ip + 1);
8798 klass = mini_get_class (method, token, generic_context);
8799 CHECK_TYPELOAD (klass);
8800 mono_class_init (klass);
8803 klass = array_access_to_klass (*ip);
8805 if (sp [0]->type != STACK_OBJ)
8808 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8810 if (sp [1]->opcode == OP_ICONST) {
8811 int array_reg = sp [0]->dreg;
8812 int index_reg = sp [1]->dreg;
8813 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8815 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8816 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8818 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8819 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8822 if (*ip == CEE_LDELEM)
8835 case CEE_STELEM_REF:
8842 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8844 if (*ip == CEE_STELEM) {
8846 token = read32 (ip + 1);
8847 klass = mini_get_class (method, token, generic_context);
8848 CHECK_TYPELOAD (klass);
8849 mono_class_init (klass);
8852 klass = array_access_to_klass (*ip);
8854 if (sp [0]->type != STACK_OBJ)
8857 /* storing a NULL doesn't need any of the complex checks in stelemref */
8858 if (generic_class_is_reference_type (cfg, klass) &&
8859 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8860 MonoMethod* helper = mono_marshal_get_stelemref ();
8861 MonoInst *iargs [3];
8863 if (sp [0]->type != STACK_OBJ)
8865 if (sp [2]->type != STACK_OBJ)
8872 mono_emit_method_call (cfg, helper, iargs, NULL);
8874 if (sp [1]->opcode == OP_ICONST) {
8875 int array_reg = sp [0]->dreg;
8876 int index_reg = sp [1]->dreg;
8877 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8879 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8880 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8882 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8883 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8887 if (*ip == CEE_STELEM)
8894 case CEE_CKFINITE: {
8898 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8899 ins->sreg1 = sp [0]->dreg;
8900 ins->dreg = alloc_freg (cfg);
8901 ins->type = STACK_R8;
8902 MONO_ADD_INS (bblock, ins);
8904 *sp++ = mono_decompose_opcode (cfg, ins);
8909 case CEE_REFANYVAL: {
8910 MonoInst *src_var, *src;
8912 int klass_reg = alloc_preg (cfg);
8913 int dreg = alloc_preg (cfg);
8916 MONO_INST_NEW (cfg, ins, *ip);
8919 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8920 CHECK_TYPELOAD (klass);
8921 mono_class_init (klass);
8923 if (cfg->generic_sharing_context)
8924 context_used = mono_class_check_context_used (klass);
8927 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8929 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8930 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8934 MonoInst *klass_ins;
8936 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8937 klass, MONO_RGCTX_INFO_KLASS);
8940 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8941 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8943 mini_emit_class_check (cfg, klass_reg, klass);
8945 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8946 ins->type = STACK_MP;
8951 case CEE_MKREFANY: {
8952 MonoInst *loc, *addr;
8955 MONO_INST_NEW (cfg, ins, *ip);
8958 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8959 CHECK_TYPELOAD (klass);
8960 mono_class_init (klass);
8962 if (cfg->generic_sharing_context)
8963 context_used = mono_class_check_context_used (klass);
8965 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8966 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8969 MonoInst *const_ins;
8970 int type_reg = alloc_preg (cfg);
8972 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8973 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8976 } else if (cfg->compile_aot) {
8977 int const_reg = alloc_preg (cfg);
8978 int type_reg = alloc_preg (cfg);
8980 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8981 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8982 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8983 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8985 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8986 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8988 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8990 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8991 ins->type = STACK_VTYPE;
8992 ins->klass = mono_defaults.typed_reference_class;
8999 MonoClass *handle_class;
9001 CHECK_STACK_OVF (1);
9004 n = read32 (ip + 1);
9006 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9007 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9008 handle = mono_method_get_wrapper_data (method, n);
9009 handle_class = mono_method_get_wrapper_data (method, n + 1);
9010 if (handle_class == mono_defaults.typehandle_class)
9011 handle = &((MonoClass*)handle)->byval_arg;
9014 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9018 mono_class_init (handle_class);
9019 if (cfg->generic_sharing_context) {
9020 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9021 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9022 /* This case handles ldtoken
9023 of an open type, like for
9026 } else if (handle_class == mono_defaults.typehandle_class) {
9027 /* If we get a MONO_TYPE_CLASS
9028 then we need to provide the
9030 instantiation of it. */
9031 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9034 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9035 } else if (handle_class == mono_defaults.fieldhandle_class)
9036 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9037 else if (handle_class == mono_defaults.methodhandle_class)
9038 context_used = mono_method_check_context_used (handle);
9040 g_assert_not_reached ();
9043 if ((cfg->opt & MONO_OPT_SHARED) &&
9044 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9045 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9046 MonoInst *addr, *vtvar, *iargs [3];
9047 int method_context_used;
9049 if (cfg->generic_sharing_context)
9050 method_context_used = mono_method_check_context_used (method);
9052 method_context_used = 0;
9054 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9056 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9057 EMIT_NEW_ICONST (cfg, iargs [1], n);
9058 if (method_context_used) {
9059 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9060 method, MONO_RGCTX_INFO_METHOD);
9061 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9063 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9064 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9066 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9070 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9072 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9073 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9074 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9075 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9076 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9077 MonoClass *tclass = mono_class_from_mono_type (handle);
9079 mono_class_init (tclass);
9081 ins = emit_get_rgctx_klass (cfg, context_used,
9082 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9083 } else if (cfg->compile_aot) {
9084 if (method->wrapper_type) {
9085 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9086 /* Special case for static synchronized wrappers */
9087 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9089 /* FIXME: n is not a normal token */
9090 cfg->disable_aot = TRUE;
9091 EMIT_NEW_PCONST (cfg, ins, NULL);
9094 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9097 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9099 ins->type = STACK_OBJ;
9100 ins->klass = cmethod->klass;
9103 MonoInst *addr, *vtvar;
9105 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9108 if (handle_class == mono_defaults.typehandle_class) {
9109 ins = emit_get_rgctx_klass (cfg, context_used,
9110 mono_class_from_mono_type (handle),
9111 MONO_RGCTX_INFO_TYPE);
9112 } else if (handle_class == mono_defaults.methodhandle_class) {
9113 ins = emit_get_rgctx_method (cfg, context_used,
9114 handle, MONO_RGCTX_INFO_METHOD);
9115 } else if (handle_class == mono_defaults.fieldhandle_class) {
9116 ins = emit_get_rgctx_field (cfg, context_used,
9117 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9119 g_assert_not_reached ();
9121 } else if (cfg->compile_aot) {
9122 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9124 EMIT_NEW_PCONST (cfg, ins, handle);
9126 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9127 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9128 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9138 MONO_INST_NEW (cfg, ins, OP_THROW);
9140 ins->sreg1 = sp [0]->dreg;
9142 bblock->out_of_line = TRUE;
9143 MONO_ADD_INS (bblock, ins);
9144 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9145 MONO_ADD_INS (bblock, ins);
9148 link_bblock (cfg, bblock, end_bblock);
9149 start_new_bblock = 1;
9151 case CEE_ENDFINALLY:
9152 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9153 MONO_ADD_INS (bblock, ins);
9155 start_new_bblock = 1;
9158 * Control will leave the method so empty the stack, otherwise
9159 * the next basic block will start with a nonempty stack.
9161 while (sp != stack_start) {
9169 if (*ip == CEE_LEAVE) {
9171 target = ip + 5 + (gint32)read32(ip + 1);
9174 target = ip + 2 + (signed char)(ip [1]);
9177 /* empty the stack */
9178 while (sp != stack_start) {
9183 * If this leave statement is in a catch block, check for a
9184 * pending exception, and rethrow it if necessary.
9185 * We avoid doing this in runtime invoke wrappers, since those are called
9186 * by native code which excepts the wrapper to catch all exceptions.
9188 for (i = 0; i < header->num_clauses; ++i) {
9189 MonoExceptionClause *clause = &header->clauses [i];
9192 * Use <= in the final comparison to handle clauses with multiple
9193 * leave statements, like in bug #78024.
9194 * The ordering of the exception clauses guarantees that we find the
9197 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9199 MonoBasicBlock *dont_throw;
9204 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9207 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9209 NEW_BBLOCK (cfg, dont_throw);
9212 * Currently, we allways rethrow the abort exception, despite the
9213 * fact that this is not correct. See thread6.cs for an example.
9214 * But propagating the abort exception is more important than
9215 * getting the sematics right.
9217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9218 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9219 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9221 MONO_START_BB (cfg, dont_throw);
9226 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9228 MonoExceptionClause *clause;
9230 for (tmp = handlers; tmp; tmp = tmp->next) {
9232 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9234 link_bblock (cfg, bblock, tblock);
9235 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9236 ins->inst_target_bb = tblock;
9237 ins->inst_eh_block = clause;
9238 MONO_ADD_INS (bblock, ins);
9239 bblock->has_call_handler = 1;
9240 if (COMPILE_LLVM (cfg)) {
9241 MonoBasicBlock *target_bb;
9244 * Link the finally bblock with the target, since it will
9245 * conceptually branch there.
9246 * FIXME: Have to link the bblock containing the endfinally.
9248 GET_BBLOCK (cfg, target_bb, target);
9249 link_bblock (cfg, tblock, target_bb);
9252 g_list_free (handlers);
9255 MONO_INST_NEW (cfg, ins, OP_BR);
9256 MONO_ADD_INS (bblock, ins);
9257 GET_BBLOCK (cfg, tblock, target);
9258 link_bblock (cfg, bblock, tblock);
9259 ins->inst_target_bb = tblock;
9260 start_new_bblock = 1;
9262 if (*ip == CEE_LEAVE)
9271 * Mono specific opcodes
9273 case MONO_CUSTOM_PREFIX: {
9275 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9279 case CEE_MONO_ICALL: {
9281 MonoJitICallInfo *info;
9283 token = read32 (ip + 2);
9284 func = mono_method_get_wrapper_data (method, token);
9285 info = mono_find_jit_icall_by_addr (func);
9288 CHECK_STACK (info->sig->param_count);
9289 sp -= info->sig->param_count;
9291 ins = mono_emit_jit_icall (cfg, info->func, sp);
9292 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9296 inline_costs += 10 * num_calls++;
9300 case CEE_MONO_LDPTR: {
9303 CHECK_STACK_OVF (1);
9305 token = read32 (ip + 2);
9307 ptr = mono_method_get_wrapper_data (method, token);
9308 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9309 MonoJitICallInfo *callinfo;
9310 const char *icall_name;
9312 icall_name = method->name + strlen ("__icall_wrapper_");
9313 g_assert (icall_name);
9314 callinfo = mono_find_jit_icall_by_name (icall_name);
9315 g_assert (callinfo);
9317 if (ptr == callinfo->func) {
9318 /* Will be transformed into an AOTCONST later */
9319 EMIT_NEW_PCONST (cfg, ins, ptr);
9325 /* FIXME: Generalize this */
9326 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9327 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9332 EMIT_NEW_PCONST (cfg, ins, ptr);
9335 inline_costs += 10 * num_calls++;
9336 /* Can't embed random pointers into AOT code */
9337 cfg->disable_aot = 1;
9340 case CEE_MONO_ICALL_ADDR: {
9341 MonoMethod *cmethod;
9344 CHECK_STACK_OVF (1);
9346 token = read32 (ip + 2);
9348 cmethod = mono_method_get_wrapper_data (method, token);
9350 if (cfg->compile_aot) {
9351 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9353 ptr = mono_lookup_internal_call (cmethod);
9355 EMIT_NEW_PCONST (cfg, ins, ptr);
9361 case CEE_MONO_VTADDR: {
9362 MonoInst *src_var, *src;
9368 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9369 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9374 case CEE_MONO_NEWOBJ: {
9375 MonoInst *iargs [2];
9377 CHECK_STACK_OVF (1);
9379 token = read32 (ip + 2);
9380 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9381 mono_class_init (klass);
9382 NEW_DOMAINCONST (cfg, iargs [0]);
9383 MONO_ADD_INS (cfg->cbb, iargs [0]);
9384 NEW_CLASSCONST (cfg, iargs [1], klass);
9385 MONO_ADD_INS (cfg->cbb, iargs [1]);
9386 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9388 inline_costs += 10 * num_calls++;
9391 case CEE_MONO_OBJADDR:
9394 MONO_INST_NEW (cfg, ins, OP_MOVE);
9395 ins->dreg = alloc_preg (cfg);
9396 ins->sreg1 = sp [0]->dreg;
9397 ins->type = STACK_MP;
9398 MONO_ADD_INS (cfg->cbb, ins);
9402 case CEE_MONO_LDNATIVEOBJ:
9404 * Similar to LDOBJ, but instead load the unmanaged
9405 * representation of the vtype to the stack.
9410 token = read32 (ip + 2);
9411 klass = mono_method_get_wrapper_data (method, token);
9412 g_assert (klass->valuetype);
9413 mono_class_init (klass);
9416 MonoInst *src, *dest, *temp;
9419 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9420 temp->backend.is_pinvoke = 1;
9421 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9422 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9424 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9425 dest->type = STACK_VTYPE;
9426 dest->klass = klass;
9432 case CEE_MONO_RETOBJ: {
9434 * Same as RET, but return the native representation of a vtype
9437 g_assert (cfg->ret);
9438 g_assert (mono_method_signature (method)->pinvoke);
9443 token = read32 (ip + 2);
9444 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9446 if (!cfg->vret_addr) {
9447 g_assert (cfg->ret_var_is_local);
9449 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9451 EMIT_NEW_RETLOADA (cfg, ins);
9453 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9455 if (sp != stack_start)
9458 MONO_INST_NEW (cfg, ins, OP_BR);
9459 ins->inst_target_bb = end_bblock;
9460 MONO_ADD_INS (bblock, ins);
9461 link_bblock (cfg, bblock, end_bblock);
9462 start_new_bblock = 1;
9466 case CEE_MONO_CISINST:
9467 case CEE_MONO_CCASTCLASS: {
9472 token = read32 (ip + 2);
9473 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9474 if (ip [1] == CEE_MONO_CISINST)
9475 ins = handle_cisinst (cfg, klass, sp [0]);
9477 ins = handle_ccastclass (cfg, klass, sp [0]);
9483 case CEE_MONO_SAVE_LMF:
9484 case CEE_MONO_RESTORE_LMF:
9485 #ifdef MONO_ARCH_HAVE_LMF_OPS
9486 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9487 MONO_ADD_INS (bblock, ins);
9488 cfg->need_lmf_area = TRUE;
9492 case CEE_MONO_CLASSCONST:
9493 CHECK_STACK_OVF (1);
9495 token = read32 (ip + 2);
9496 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9499 inline_costs += 10 * num_calls++;
9501 case CEE_MONO_NOT_TAKEN:
9502 bblock->out_of_line = TRUE;
9506 CHECK_STACK_OVF (1);
9508 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9509 ins->dreg = alloc_preg (cfg);
9510 ins->inst_offset = (gint32)read32 (ip + 2);
9511 ins->type = STACK_PTR;
9512 MONO_ADD_INS (bblock, ins);
9516 case CEE_MONO_DYN_CALL: {
9519 /* It would be easier to call a trampoline, but that would put an
9520 * extra frame on the stack, confusing exception handling. So
9521 * implement it inline using an opcode for now.
9524 if (!cfg->dyn_call_var) {
9525 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9526 /* prevent it from being register allocated */
9527 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9530 /* Has to use a call inst since it local regalloc expects it */
9531 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9532 ins = (MonoInst*)call;
9534 ins->sreg1 = sp [0]->dreg;
9535 ins->sreg2 = sp [1]->dreg;
9536 MONO_ADD_INS (bblock, ins);
9538 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9539 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9543 inline_costs += 10 * num_calls++;
9548 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9558 /* somewhat similar to LDTOKEN */
9559 MonoInst *addr, *vtvar;
9560 CHECK_STACK_OVF (1);
9561 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9563 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9564 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9566 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9567 ins->type = STACK_VTYPE;
9568 ins->klass = mono_defaults.argumenthandle_class;
9581 * The following transforms:
9582 * CEE_CEQ into OP_CEQ
9583 * CEE_CGT into OP_CGT
9584 * CEE_CGT_UN into OP_CGT_UN
9585 * CEE_CLT into OP_CLT
9586 * CEE_CLT_UN into OP_CLT_UN
9588 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9590 MONO_INST_NEW (cfg, ins, cmp->opcode);
9592 cmp->sreg1 = sp [0]->dreg;
9593 cmp->sreg2 = sp [1]->dreg;
9594 type_from_op (cmp, sp [0], sp [1]);
9596 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9597 cmp->opcode = OP_LCOMPARE;
9598 else if (sp [0]->type == STACK_R8)
9599 cmp->opcode = OP_FCOMPARE;
9601 cmp->opcode = OP_ICOMPARE;
9602 MONO_ADD_INS (bblock, cmp);
9603 ins->type = STACK_I4;
9604 ins->dreg = alloc_dreg (cfg, ins->type);
9605 type_from_op (ins, sp [0], sp [1]);
9607 if (cmp->opcode == OP_FCOMPARE) {
9609 * The backends expect the fceq opcodes to do the
9612 cmp->opcode = OP_NOP;
9613 ins->sreg1 = cmp->sreg1;
9614 ins->sreg2 = cmp->sreg2;
9616 MONO_ADD_INS (bblock, ins);
9623 MonoMethod *cil_method;
9624 gboolean needs_static_rgctx_invoke;
9626 CHECK_STACK_OVF (1);
9628 n = read32 (ip + 2);
9629 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9630 if (!cmethod || mono_loader_get_last_error ())
9632 mono_class_init (cmethod->klass);
9634 mono_save_token_info (cfg, image, n, cmethod);
9636 if (cfg->generic_sharing_context)
9637 context_used = mono_method_check_context_used (cmethod);
9639 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9641 cil_method = cmethod;
9642 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9643 METHOD_ACCESS_FAILURE;
9645 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9646 if (check_linkdemand (cfg, method, cmethod))
9648 CHECK_CFG_EXCEPTION;
9649 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9650 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9654 * Optimize the common case of ldftn+delegate creation
9656 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9657 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9658 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9659 MonoInst *target_ins;
9661 int invoke_context_used = 0;
9663 invoke = mono_get_delegate_invoke (ctor_method->klass);
9664 if (!invoke || !mono_method_signature (invoke))
9667 if (cfg->generic_sharing_context)
9668 invoke_context_used = mono_method_check_context_used (invoke);
9670 target_ins = sp [-1];
9672 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9673 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9674 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9676 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9680 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9681 /* FIXME: SGEN support */
9682 if (invoke_context_used == 0) {
9684 if (cfg->verbose_level > 3)
9685 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9687 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9688 CHECK_CFG_EXCEPTION;
9697 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9698 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9702 inline_costs += 10 * num_calls++;
9705 case CEE_LDVIRTFTN: {
9710 n = read32 (ip + 2);
9711 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9712 if (!cmethod || mono_loader_get_last_error ())
9714 mono_class_init (cmethod->klass);
9716 if (cfg->generic_sharing_context)
9717 context_used = mono_method_check_context_used (cmethod);
9719 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9720 if (check_linkdemand (cfg, method, cmethod))
9722 CHECK_CFG_EXCEPTION;
9723 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9724 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9730 args [1] = emit_get_rgctx_method (cfg, context_used,
9731 cmethod, MONO_RGCTX_INFO_METHOD);
9734 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9736 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9739 inline_costs += 10 * num_calls++;
9743 CHECK_STACK_OVF (1);
9745 n = read16 (ip + 2);
9747 EMIT_NEW_ARGLOAD (cfg, ins, n);
9752 CHECK_STACK_OVF (1);
9754 n = read16 (ip + 2);
9756 NEW_ARGLOADA (cfg, ins, n);
9757 MONO_ADD_INS (cfg->cbb, ins);
9765 n = read16 (ip + 2);
9767 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9769 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9773 CHECK_STACK_OVF (1);
9775 n = read16 (ip + 2);
9777 EMIT_NEW_LOCLOAD (cfg, ins, n);
9782 unsigned char *tmp_ip;
9783 CHECK_STACK_OVF (1);
9785 n = read16 (ip + 2);
9788 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9794 EMIT_NEW_LOCLOADA (cfg, ins, n);
9803 n = read16 (ip + 2);
9805 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9807 emit_stloc_ir (cfg, sp, header, n);
9814 if (sp != stack_start)
9816 if (cfg->method != method)
9818 * Inlining this into a loop in a parent could lead to
9819 * stack overflows which is different behavior than the
9820 * non-inlined case, thus disable inlining in this case.
9822 goto inline_failure;
9824 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9825 ins->dreg = alloc_preg (cfg);
9826 ins->sreg1 = sp [0]->dreg;
9827 ins->type = STACK_PTR;
9828 MONO_ADD_INS (cfg->cbb, ins);
9830 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9832 ins->flags |= MONO_INST_INIT;
9837 case CEE_ENDFILTER: {
9838 MonoExceptionClause *clause, *nearest;
9839 int cc, nearest_num;
9843 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9845 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9846 ins->sreg1 = (*sp)->dreg;
9847 MONO_ADD_INS (bblock, ins);
9848 start_new_bblock = 1;
9853 for (cc = 0; cc < header->num_clauses; ++cc) {
9854 clause = &header->clauses [cc];
9855 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9856 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9857 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9863 if ((ip - header->code) != nearest->handler_offset)
9868 case CEE_UNALIGNED_:
9869 ins_flag |= MONO_INST_UNALIGNED;
9870 /* FIXME: record alignment? we can assume 1 for now */
9875 ins_flag |= MONO_INST_VOLATILE;
9879 ins_flag |= MONO_INST_TAILCALL;
9880 cfg->flags |= MONO_CFG_HAS_TAIL;
9881 /* Can't inline tail calls at this time */
9882 inline_costs += 100000;
9889 token = read32 (ip + 2);
9890 klass = mini_get_class (method, token, generic_context);
9891 CHECK_TYPELOAD (klass);
9892 if (generic_class_is_reference_type (cfg, klass))
9893 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9895 mini_emit_initobj (cfg, *sp, NULL, klass);
9899 case CEE_CONSTRAINED_:
9901 token = read32 (ip + 2);
9902 if (method->wrapper_type != MONO_WRAPPER_NONE)
9903 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9905 constrained_call = mono_class_get_full (image, token, generic_context);
9906 CHECK_TYPELOAD (constrained_call);
9911 MonoInst *iargs [3];
9915 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9916 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9917 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9918 /* emit_memset only works when val == 0 */
9919 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9924 if (ip [1] == CEE_CPBLK) {
9925 MonoMethod *memcpy_method = get_memcpy_method ();
9926 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9928 MonoMethod *memset_method = get_memset_method ();
9929 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9939 ins_flag |= MONO_INST_NOTYPECHECK;
9941 ins_flag |= MONO_INST_NORANGECHECK;
9942 /* we ignore the no-nullcheck for now since we
9943 * really do it explicitly only when doing callvirt->call
9949 int handler_offset = -1;
9951 for (i = 0; i < header->num_clauses; ++i) {
9952 MonoExceptionClause *clause = &header->clauses [i];
9953 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9954 handler_offset = clause->handler_offset;
9959 bblock->flags |= BB_EXCEPTION_UNSAFE;
9961 g_assert (handler_offset != -1);
9963 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9964 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9965 ins->sreg1 = load->dreg;
9966 MONO_ADD_INS (bblock, ins);
9968 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9969 MONO_ADD_INS (bblock, ins);
9972 link_bblock (cfg, bblock, end_bblock);
9973 start_new_bblock = 1;
9981 CHECK_STACK_OVF (1);
9983 token = read32 (ip + 2);
9984 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9985 MonoType *type = mono_type_create_from_typespec (image, token);
9986 token = mono_type_size (type, &ialign);
9988 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9989 CHECK_TYPELOAD (klass);
9990 mono_class_init (klass);
9991 token = mono_class_value_size (klass, &align);
9993 EMIT_NEW_ICONST (cfg, ins, token);
9998 case CEE_REFANYTYPE: {
9999 MonoInst *src_var, *src;
10005 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10007 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10008 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10009 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10014 case CEE_READONLY_:
10027 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10037 g_warning ("opcode 0x%02x not handled", *ip);
10041 if (start_new_bblock != 1)
10044 bblock->cil_length = ip - bblock->cil_code;
10045 bblock->next_bb = end_bblock;
10047 if (cfg->method == method && cfg->domainvar) {
10049 MonoInst *get_domain;
10051 cfg->cbb = init_localsbb;
10053 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10054 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10057 get_domain->dreg = alloc_preg (cfg);
10058 MONO_ADD_INS (cfg->cbb, get_domain);
10060 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10061 MONO_ADD_INS (cfg->cbb, store);
10064 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10065 if (cfg->compile_aot)
10066 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10067 mono_get_got_var (cfg);
10070 if (cfg->method == method && cfg->got_var)
10071 mono_emit_load_got_addr (cfg);
10076 cfg->cbb = init_localsbb;
10078 for (i = 0; i < header->num_locals; ++i) {
10079 MonoType *ptype = header->locals [i];
10080 int t = ptype->type;
10081 dreg = cfg->locals [i]->dreg;
10083 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10084 t = mono_class_enum_basetype (ptype->data.klass)->type;
10085 if (ptype->byref) {
10086 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10087 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10088 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10089 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10090 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10091 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10092 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10093 ins->type = STACK_R8;
10094 ins->inst_p0 = (void*)&r8_0;
10095 ins->dreg = alloc_dreg (cfg, STACK_R8);
10096 MONO_ADD_INS (init_localsbb, ins);
10097 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10098 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10099 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10100 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10102 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10107 if (cfg->init_ref_vars && cfg->method == method) {
10108 /* Emit initialization for ref vars */
10109 // FIXME: Avoid duplication initialization for IL locals.
10110 for (i = 0; i < cfg->num_varinfo; ++i) {
10111 MonoInst *ins = cfg->varinfo [i];
10113 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10114 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10118 /* Add a sequence point for method entry/exit events */
10120 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10121 MONO_ADD_INS (init_localsbb, ins);
10122 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10123 MONO_ADD_INS (cfg->bb_exit, ins);
10128 if (cfg->method == method) {
10129 MonoBasicBlock *bb;
10130 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10131 bb->region = mono_find_block_region (cfg, bb->real_offset);
10133 mono_create_spvar_for_region (cfg, bb->region);
10134 if (cfg->verbose_level > 2)
10135 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10139 g_slist_free (class_inits);
10140 dont_inline = g_list_remove (dont_inline, method);
10142 if (inline_costs < 0) {
10145 /* Method is too large */
10146 mname = mono_method_full_name (method, TRUE);
10147 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10148 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10150 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10151 mono_basic_block_free (original_bb);
10155 if ((cfg->verbose_level > 2) && (cfg->method == method))
10156 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10158 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10159 mono_basic_block_free (original_bb);
10160 return inline_costs;
10163 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10170 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10174 set_exception_type_from_invalid_il (cfg, method, ip);
10178 g_slist_free (class_inits);
10179 mono_basic_block_free (original_bb);
10180 dont_inline = g_list_remove (dont_inline, method);
10181 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10186 store_membase_reg_to_store_membase_imm (int opcode)
10189 case OP_STORE_MEMBASE_REG:
10190 return OP_STORE_MEMBASE_IMM;
10191 case OP_STOREI1_MEMBASE_REG:
10192 return OP_STOREI1_MEMBASE_IMM;
10193 case OP_STOREI2_MEMBASE_REG:
10194 return OP_STOREI2_MEMBASE_IMM;
10195 case OP_STOREI4_MEMBASE_REG:
10196 return OP_STOREI4_MEMBASE_IMM;
10197 case OP_STOREI8_MEMBASE_REG:
10198 return OP_STOREI8_MEMBASE_IMM;
10200 g_assert_not_reached ();
10206 #endif /* DISABLE_JIT */
10209 mono_op_to_op_imm (int opcode)
10213 return OP_IADD_IMM;
10215 return OP_ISUB_IMM;
10217 return OP_IDIV_IMM;
10219 return OP_IDIV_UN_IMM;
10221 return OP_IREM_IMM;
10223 return OP_IREM_UN_IMM;
10225 return OP_IMUL_IMM;
10227 return OP_IAND_IMM;
10231 return OP_IXOR_IMM;
10233 return OP_ISHL_IMM;
10235 return OP_ISHR_IMM;
10237 return OP_ISHR_UN_IMM;
10240 return OP_LADD_IMM;
10242 return OP_LSUB_IMM;
10244 return OP_LAND_IMM;
10248 return OP_LXOR_IMM;
10250 return OP_LSHL_IMM;
10252 return OP_LSHR_IMM;
10254 return OP_LSHR_UN_IMM;
10257 return OP_COMPARE_IMM;
10259 return OP_ICOMPARE_IMM;
10261 return OP_LCOMPARE_IMM;
10263 case OP_STORE_MEMBASE_REG:
10264 return OP_STORE_MEMBASE_IMM;
10265 case OP_STOREI1_MEMBASE_REG:
10266 return OP_STOREI1_MEMBASE_IMM;
10267 case OP_STOREI2_MEMBASE_REG:
10268 return OP_STOREI2_MEMBASE_IMM;
10269 case OP_STOREI4_MEMBASE_REG:
10270 return OP_STOREI4_MEMBASE_IMM;
10272 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10274 return OP_X86_PUSH_IMM;
10275 case OP_X86_COMPARE_MEMBASE_REG:
10276 return OP_X86_COMPARE_MEMBASE_IMM;
10278 #if defined(TARGET_AMD64)
10279 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10280 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10282 case OP_VOIDCALL_REG:
10283 return OP_VOIDCALL;
10291 return OP_LOCALLOC_IMM;
10298 ldind_to_load_membase (int opcode)
10302 return OP_LOADI1_MEMBASE;
10304 return OP_LOADU1_MEMBASE;
10306 return OP_LOADI2_MEMBASE;
10308 return OP_LOADU2_MEMBASE;
10310 return OP_LOADI4_MEMBASE;
10312 return OP_LOADU4_MEMBASE;
10314 return OP_LOAD_MEMBASE;
10315 case CEE_LDIND_REF:
10316 return OP_LOAD_MEMBASE;
10318 return OP_LOADI8_MEMBASE;
10320 return OP_LOADR4_MEMBASE;
10322 return OP_LOADR8_MEMBASE;
10324 g_assert_not_reached ();
10331 stind_to_store_membase (int opcode)
10335 return OP_STOREI1_MEMBASE_REG;
10337 return OP_STOREI2_MEMBASE_REG;
10339 return OP_STOREI4_MEMBASE_REG;
10341 case CEE_STIND_REF:
10342 return OP_STORE_MEMBASE_REG;
10344 return OP_STOREI8_MEMBASE_REG;
10346 return OP_STORER4_MEMBASE_REG;
10348 return OP_STORER8_MEMBASE_REG;
10350 g_assert_not_reached ();
10357 mono_load_membase_to_load_mem (int opcode)
10359 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10360 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10362 case OP_LOAD_MEMBASE:
10363 return OP_LOAD_MEM;
10364 case OP_LOADU1_MEMBASE:
10365 return OP_LOADU1_MEM;
10366 case OP_LOADU2_MEMBASE:
10367 return OP_LOADU2_MEM;
10368 case OP_LOADI4_MEMBASE:
10369 return OP_LOADI4_MEM;
10370 case OP_LOADU4_MEMBASE:
10371 return OP_LOADU4_MEM;
10372 #if SIZEOF_REGISTER == 8
10373 case OP_LOADI8_MEMBASE:
10374 return OP_LOADI8_MEM;
10383 op_to_op_dest_membase (int store_opcode, int opcode)
10385 #if defined(TARGET_X86)
10386 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10391 return OP_X86_ADD_MEMBASE_REG;
10393 return OP_X86_SUB_MEMBASE_REG;
10395 return OP_X86_AND_MEMBASE_REG;
10397 return OP_X86_OR_MEMBASE_REG;
10399 return OP_X86_XOR_MEMBASE_REG;
10402 return OP_X86_ADD_MEMBASE_IMM;
10405 return OP_X86_SUB_MEMBASE_IMM;
10408 return OP_X86_AND_MEMBASE_IMM;
10411 return OP_X86_OR_MEMBASE_IMM;
10414 return OP_X86_XOR_MEMBASE_IMM;
10420 #if defined(TARGET_AMD64)
10421 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10426 return OP_X86_ADD_MEMBASE_REG;
10428 return OP_X86_SUB_MEMBASE_REG;
10430 return OP_X86_AND_MEMBASE_REG;
10432 return OP_X86_OR_MEMBASE_REG;
10434 return OP_X86_XOR_MEMBASE_REG;
10436 return OP_X86_ADD_MEMBASE_IMM;
10438 return OP_X86_SUB_MEMBASE_IMM;
10440 return OP_X86_AND_MEMBASE_IMM;
10442 return OP_X86_OR_MEMBASE_IMM;
10444 return OP_X86_XOR_MEMBASE_IMM;
10446 return OP_AMD64_ADD_MEMBASE_REG;
10448 return OP_AMD64_SUB_MEMBASE_REG;
10450 return OP_AMD64_AND_MEMBASE_REG;
10452 return OP_AMD64_OR_MEMBASE_REG;
10454 return OP_AMD64_XOR_MEMBASE_REG;
10457 return OP_AMD64_ADD_MEMBASE_IMM;
10460 return OP_AMD64_SUB_MEMBASE_IMM;
10463 return OP_AMD64_AND_MEMBASE_IMM;
10466 return OP_AMD64_OR_MEMBASE_IMM;
10469 return OP_AMD64_XOR_MEMBASE_IMM;
10479 op_to_op_store_membase (int store_opcode, int opcode)
10481 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10484 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10485 return OP_X86_SETEQ_MEMBASE;
10487 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10488 return OP_X86_SETNE_MEMBASE;
10496 op_to_op_src1_membase (int load_opcode, int opcode)
10499 /* FIXME: This has sign extension issues */
10501 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10502 return OP_X86_COMPARE_MEMBASE8_IMM;
10505 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10510 return OP_X86_PUSH_MEMBASE;
10511 case OP_COMPARE_IMM:
10512 case OP_ICOMPARE_IMM:
10513 return OP_X86_COMPARE_MEMBASE_IMM;
10516 return OP_X86_COMPARE_MEMBASE_REG;
10520 #ifdef TARGET_AMD64
10521 /* FIXME: This has sign extension issues */
10523 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10524 return OP_X86_COMPARE_MEMBASE8_IMM;
10529 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10530 return OP_X86_PUSH_MEMBASE;
10532 /* FIXME: This only works for 32 bit immediates
10533 case OP_COMPARE_IMM:
10534 case OP_LCOMPARE_IMM:
10535 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10536 return OP_AMD64_COMPARE_MEMBASE_IMM;
10538 case OP_ICOMPARE_IMM:
10539 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10540 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10544 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10545 return OP_AMD64_COMPARE_MEMBASE_REG;
10548 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10549 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10558 op_to_op_src2_membase (int load_opcode, int opcode)
10561 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10567 return OP_X86_COMPARE_REG_MEMBASE;
10569 return OP_X86_ADD_REG_MEMBASE;
10571 return OP_X86_SUB_REG_MEMBASE;
10573 return OP_X86_AND_REG_MEMBASE;
10575 return OP_X86_OR_REG_MEMBASE;
10577 return OP_X86_XOR_REG_MEMBASE;
10581 #ifdef TARGET_AMD64
10582 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10585 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10587 return OP_X86_ADD_REG_MEMBASE;
10589 return OP_X86_SUB_REG_MEMBASE;
10591 return OP_X86_AND_REG_MEMBASE;
10593 return OP_X86_OR_REG_MEMBASE;
10595 return OP_X86_XOR_REG_MEMBASE;
10597 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10601 return OP_AMD64_COMPARE_REG_MEMBASE;
10603 return OP_AMD64_ADD_REG_MEMBASE;
10605 return OP_AMD64_SUB_REG_MEMBASE;
10607 return OP_AMD64_AND_REG_MEMBASE;
10609 return OP_AMD64_OR_REG_MEMBASE;
10611 return OP_AMD64_XOR_REG_MEMBASE;
10620 mono_op_to_op_imm_noemul (int opcode)
10623 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10629 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10637 return mono_op_to_op_imm (opcode);
10641 #ifndef DISABLE_JIT
10644 * mono_handle_global_vregs:
10646 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10650 mono_handle_global_vregs (MonoCompile *cfg)
10652 gint32 *vreg_to_bb;
10653 MonoBasicBlock *bb;
10656 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10658 #ifdef MONO_ARCH_SIMD_INTRINSICS
10659 if (cfg->uses_simd_intrinsics)
10660 mono_simd_simplify_indirection (cfg);
10663 /* Find local vregs used in more than one bb */
10664 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10665 MonoInst *ins = bb->code;
10666 int block_num = bb->block_num;
10668 if (cfg->verbose_level > 2)
10669 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10672 for (; ins; ins = ins->next) {
10673 const char *spec = INS_INFO (ins->opcode);
10674 int regtype = 0, regindex;
10677 if (G_UNLIKELY (cfg->verbose_level > 2))
10678 mono_print_ins (ins);
10680 g_assert (ins->opcode >= MONO_CEE_LAST);
10682 for (regindex = 0; regindex < 4; regindex ++) {
10685 if (regindex == 0) {
10686 regtype = spec [MONO_INST_DEST];
10687 if (regtype == ' ')
10690 } else if (regindex == 1) {
10691 regtype = spec [MONO_INST_SRC1];
10692 if (regtype == ' ')
10695 } else if (regindex == 2) {
10696 regtype = spec [MONO_INST_SRC2];
10697 if (regtype == ' ')
10700 } else if (regindex == 3) {
10701 regtype = spec [MONO_INST_SRC3];
10702 if (regtype == ' ')
10707 #if SIZEOF_REGISTER == 4
10708 /* In the LLVM case, the long opcodes are not decomposed */
10709 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10711 * Since some instructions reference the original long vreg,
10712 * and some reference the two component vregs, it is quite hard
10713 * to determine when it needs to be global. So be conservative.
10715 if (!get_vreg_to_inst (cfg, vreg)) {
10716 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10718 if (cfg->verbose_level > 2)
10719 printf ("LONG VREG R%d made global.\n", vreg);
10723 * Make the component vregs volatile since the optimizations can
10724 * get confused otherwise.
10726 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10727 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10731 g_assert (vreg != -1);
10733 prev_bb = vreg_to_bb [vreg];
10734 if (prev_bb == 0) {
10735 /* 0 is a valid block num */
10736 vreg_to_bb [vreg] = block_num + 1;
10737 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10738 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10741 if (!get_vreg_to_inst (cfg, vreg)) {
10742 if (G_UNLIKELY (cfg->verbose_level > 2))
10743 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10747 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10750 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10753 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10756 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10759 g_assert_not_reached ();
10763 /* Flag as having been used in more than one bb */
10764 vreg_to_bb [vreg] = -1;
10770 /* If a variable is used in only one bblock, convert it into a local vreg */
10771 for (i = 0; i < cfg->num_varinfo; i++) {
10772 MonoInst *var = cfg->varinfo [i];
10773 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10775 switch (var->type) {
10781 #if SIZEOF_REGISTER == 8
10784 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10785 /* Enabling this screws up the fp stack on x86 */
10788 /* Arguments are implicitly global */
10789 /* Putting R4 vars into registers doesn't work currently */
10790 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10792 * Make that the variable's liveness interval doesn't contain a call, since
10793 * that would cause the lvreg to be spilled, making the whole optimization
10796 /* This is too slow for JIT compilation */
10798 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10800 int def_index, call_index, ins_index;
10801 gboolean spilled = FALSE;
10806 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10807 const char *spec = INS_INFO (ins->opcode);
10809 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10810 def_index = ins_index;
10812 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10813 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10814 if (call_index > def_index) {
10820 if (MONO_IS_CALL (ins))
10821 call_index = ins_index;
10831 if (G_UNLIKELY (cfg->verbose_level > 2))
10832 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10833 var->flags |= MONO_INST_IS_DEAD;
10834 cfg->vreg_to_inst [var->dreg] = NULL;
10841 * Compress the varinfo and vars tables so the liveness computation is faster and
10842 * takes up less space.
10845 for (i = 0; i < cfg->num_varinfo; ++i) {
10846 MonoInst *var = cfg->varinfo [i];
10847 if (pos < i && cfg->locals_start == i)
10848 cfg->locals_start = pos;
10849 if (!(var->flags & MONO_INST_IS_DEAD)) {
10851 cfg->varinfo [pos] = cfg->varinfo [i];
10852 cfg->varinfo [pos]->inst_c0 = pos;
10853 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10854 cfg->vars [pos].idx = pos;
10855 #if SIZEOF_REGISTER == 4
10856 if (cfg->varinfo [pos]->type == STACK_I8) {
10857 /* Modify the two component vars too */
10860 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10861 var1->inst_c0 = pos;
10862 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10863 var1->inst_c0 = pos;
10870 cfg->num_varinfo = pos;
10871 if (cfg->locals_start > cfg->num_varinfo)
10872 cfg->locals_start = cfg->num_varinfo;
10876 * mono_spill_global_vars:
10878 * Generate spill code for variables which are not allocated to registers,
10879 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10880 * code is generated which could be optimized by the local optimization passes.
10883 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10885 MonoBasicBlock *bb;
10887 int orig_next_vreg;
10888 guint32 *vreg_to_lvreg;
10890 guint32 i, lvregs_len;
10891 gboolean dest_has_lvreg = FALSE;
10892 guint32 stacktypes [128];
10893 MonoInst **live_range_start, **live_range_end;
10894 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10896 *need_local_opts = FALSE;
10898 memset (spec2, 0, sizeof (spec2));
10900 /* FIXME: Move this function to mini.c */
10901 stacktypes ['i'] = STACK_PTR;
10902 stacktypes ['l'] = STACK_I8;
10903 stacktypes ['f'] = STACK_R8;
10904 #ifdef MONO_ARCH_SIMD_INTRINSICS
10905 stacktypes ['x'] = STACK_VTYPE;
10908 #if SIZEOF_REGISTER == 4
10909 /* Create MonoInsts for longs */
10910 for (i = 0; i < cfg->num_varinfo; i++) {
10911 MonoInst *ins = cfg->varinfo [i];
10913 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10914 switch (ins->type) {
10919 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10922 g_assert (ins->opcode == OP_REGOFFSET);
10924 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10926 tree->opcode = OP_REGOFFSET;
10927 tree->inst_basereg = ins->inst_basereg;
10928 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10930 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10932 tree->opcode = OP_REGOFFSET;
10933 tree->inst_basereg = ins->inst_basereg;
10934 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10944 /* FIXME: widening and truncation */
10947 * As an optimization, when a variable allocated to the stack is first loaded into
10948 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10949 * the variable again.
10951 orig_next_vreg = cfg->next_vreg;
10952 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10953 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10957 * These arrays contain the first and last instructions accessing a given
10959 * Since we emit bblocks in the same order we process them here, and we
10960 * don't split live ranges, these will precisely describe the live range of
10961 * the variable, i.e. the instruction range where a valid value can be found
10962 * in the variables location.
10963 * The live range is computed using the liveness info computed by the liveness pass.
10964 * We can't use vmv->range, since that is an abstract live range, and we need
10965 * one which is instruction precise.
10966 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10968 /* FIXME: Only do this if debugging info is requested */
10969 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10970 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10971 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10972 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10974 /* Add spill loads/stores */
10975 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10978 if (cfg->verbose_level > 2)
10979 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10981 /* Clear vreg_to_lvreg array */
10982 for (i = 0; i < lvregs_len; i++)
10983 vreg_to_lvreg [lvregs [i]] = 0;
10987 MONO_BB_FOR_EACH_INS (bb, ins) {
10988 const char *spec = INS_INFO (ins->opcode);
10989 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10990 gboolean store, no_lvreg;
10991 int sregs [MONO_MAX_SRC_REGS];
10993 if (G_UNLIKELY (cfg->verbose_level > 2))
10994 mono_print_ins (ins);
10996 if (ins->opcode == OP_NOP)
11000 * We handle LDADDR here as well, since it can only be decomposed
11001 * when variable addresses are known.
11003 if (ins->opcode == OP_LDADDR) {
11004 MonoInst *var = ins->inst_p0;
11006 if (var->opcode == OP_VTARG_ADDR) {
11007 /* Happens on SPARC/S390 where vtypes are passed by reference */
11008 MonoInst *vtaddr = var->inst_left;
11009 if (vtaddr->opcode == OP_REGVAR) {
11010 ins->opcode = OP_MOVE;
11011 ins->sreg1 = vtaddr->dreg;
11013 else if (var->inst_left->opcode == OP_REGOFFSET) {
11014 ins->opcode = OP_LOAD_MEMBASE;
11015 ins->inst_basereg = vtaddr->inst_basereg;
11016 ins->inst_offset = vtaddr->inst_offset;
11020 g_assert (var->opcode == OP_REGOFFSET);
11022 ins->opcode = OP_ADD_IMM;
11023 ins->sreg1 = var->inst_basereg;
11024 ins->inst_imm = var->inst_offset;
11027 *need_local_opts = TRUE;
11028 spec = INS_INFO (ins->opcode);
11031 if (ins->opcode < MONO_CEE_LAST) {
11032 mono_print_ins (ins);
11033 g_assert_not_reached ();
11037 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11041 if (MONO_IS_STORE_MEMBASE (ins)) {
11042 tmp_reg = ins->dreg;
11043 ins->dreg = ins->sreg2;
11044 ins->sreg2 = tmp_reg;
11047 spec2 [MONO_INST_DEST] = ' ';
11048 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11049 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11050 spec2 [MONO_INST_SRC3] = ' ';
11052 } else if (MONO_IS_STORE_MEMINDEX (ins))
11053 g_assert_not_reached ();
11058 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11059 printf ("\t %.3s %d", spec, ins->dreg);
11060 num_sregs = mono_inst_get_src_registers (ins, sregs);
11061 for (srcindex = 0; srcindex < 3; ++srcindex)
11062 printf (" %d", sregs [srcindex]);
11069 regtype = spec [MONO_INST_DEST];
11070 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11073 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11074 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11075 MonoInst *store_ins;
11077 MonoInst *def_ins = ins;
11078 int dreg = ins->dreg; /* The original vreg */
11080 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11082 if (var->opcode == OP_REGVAR) {
11083 ins->dreg = var->dreg;
11084 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11086 * Instead of emitting a load+store, use a _membase opcode.
11088 g_assert (var->opcode == OP_REGOFFSET);
11089 if (ins->opcode == OP_MOVE) {
11093 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11094 ins->inst_basereg = var->inst_basereg;
11095 ins->inst_offset = var->inst_offset;
11098 spec = INS_INFO (ins->opcode);
11102 g_assert (var->opcode == OP_REGOFFSET);
11104 prev_dreg = ins->dreg;
11106 /* Invalidate any previous lvreg for this vreg */
11107 vreg_to_lvreg [ins->dreg] = 0;
11111 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11113 store_opcode = OP_STOREI8_MEMBASE_REG;
11116 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11118 if (regtype == 'l') {
11119 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11120 mono_bblock_insert_after_ins (bb, ins, store_ins);
11121 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11122 mono_bblock_insert_after_ins (bb, ins, store_ins);
11123 def_ins = store_ins;
11126 g_assert (store_opcode != OP_STOREV_MEMBASE);
11128 /* Try to fuse the store into the instruction itself */
11129 /* FIXME: Add more instructions */
11130 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11131 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11132 ins->inst_imm = ins->inst_c0;
11133 ins->inst_destbasereg = var->inst_basereg;
11134 ins->inst_offset = var->inst_offset;
11135 spec = INS_INFO (ins->opcode);
11136 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11137 ins->opcode = store_opcode;
11138 ins->inst_destbasereg = var->inst_basereg;
11139 ins->inst_offset = var->inst_offset;
11143 tmp_reg = ins->dreg;
11144 ins->dreg = ins->sreg2;
11145 ins->sreg2 = tmp_reg;
11148 spec2 [MONO_INST_DEST] = ' ';
11149 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11150 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11151 spec2 [MONO_INST_SRC3] = ' ';
11153 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11154 // FIXME: The backends expect the base reg to be in inst_basereg
11155 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11157 ins->inst_basereg = var->inst_basereg;
11158 ins->inst_offset = var->inst_offset;
11159 spec = INS_INFO (ins->opcode);
11161 /* printf ("INS: "); mono_print_ins (ins); */
11162 /* Create a store instruction */
11163 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11165 /* Insert it after the instruction */
11166 mono_bblock_insert_after_ins (bb, ins, store_ins);
11168 def_ins = store_ins;
11171 * We can't assign ins->dreg to var->dreg here, since the
11172 * sregs could use it. So set a flag, and do it after
11175 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11176 dest_has_lvreg = TRUE;
11181 if (def_ins && !live_range_start [dreg]) {
11182 live_range_start [dreg] = def_ins;
11183 live_range_start_bb [dreg] = bb;
11190 num_sregs = mono_inst_get_src_registers (ins, sregs);
11191 for (srcindex = 0; srcindex < 3; ++srcindex) {
11192 regtype = spec [MONO_INST_SRC1 + srcindex];
11193 sreg = sregs [srcindex];
11195 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11196 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11197 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11198 MonoInst *use_ins = ins;
11199 MonoInst *load_ins;
11200 guint32 load_opcode;
11202 if (var->opcode == OP_REGVAR) {
11203 sregs [srcindex] = var->dreg;
11204 //mono_inst_set_src_registers (ins, sregs);
11205 live_range_end [sreg] = use_ins;
11206 live_range_end_bb [sreg] = bb;
11210 g_assert (var->opcode == OP_REGOFFSET);
11212 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11214 g_assert (load_opcode != OP_LOADV_MEMBASE);
11216 if (vreg_to_lvreg [sreg]) {
11217 g_assert (vreg_to_lvreg [sreg] != -1);
11219 /* The variable is already loaded to an lvreg */
11220 if (G_UNLIKELY (cfg->verbose_level > 2))
11221 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11222 sregs [srcindex] = vreg_to_lvreg [sreg];
11223 //mono_inst_set_src_registers (ins, sregs);
11227 /* Try to fuse the load into the instruction */
11228 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11229 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11230 sregs [0] = var->inst_basereg;
11231 //mono_inst_set_src_registers (ins, sregs);
11232 ins->inst_offset = var->inst_offset;
11233 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11234 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11235 sregs [1] = var->inst_basereg;
11236 //mono_inst_set_src_registers (ins, sregs);
11237 ins->inst_offset = var->inst_offset;
11239 if (MONO_IS_REAL_MOVE (ins)) {
11240 ins->opcode = OP_NOP;
11243 //printf ("%d ", srcindex); mono_print_ins (ins);
11245 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11247 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11248 if (var->dreg == prev_dreg) {
11250 * sreg refers to the value loaded by the load
11251 * emitted below, but we need to use ins->dreg
11252 * since it refers to the store emitted earlier.
11256 g_assert (sreg != -1);
11257 vreg_to_lvreg [var->dreg] = sreg;
11258 g_assert (lvregs_len < 1024);
11259 lvregs [lvregs_len ++] = var->dreg;
11263 sregs [srcindex] = sreg;
11264 //mono_inst_set_src_registers (ins, sregs);
11266 if (regtype == 'l') {
11267 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11268 mono_bblock_insert_before_ins (bb, ins, load_ins);
11269 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11270 mono_bblock_insert_before_ins (bb, ins, load_ins);
11271 use_ins = load_ins;
11274 #if SIZEOF_REGISTER == 4
11275 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11277 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11278 mono_bblock_insert_before_ins (bb, ins, load_ins);
11279 use_ins = load_ins;
11283 if (var->dreg < orig_next_vreg) {
11284 live_range_end [var->dreg] = use_ins;
11285 live_range_end_bb [var->dreg] = bb;
11289 mono_inst_set_src_registers (ins, sregs);
11291 if (dest_has_lvreg) {
11292 g_assert (ins->dreg != -1);
11293 vreg_to_lvreg [prev_dreg] = ins->dreg;
11294 g_assert (lvregs_len < 1024);
11295 lvregs [lvregs_len ++] = prev_dreg;
11296 dest_has_lvreg = FALSE;
11300 tmp_reg = ins->dreg;
11301 ins->dreg = ins->sreg2;
11302 ins->sreg2 = tmp_reg;
11305 if (MONO_IS_CALL (ins)) {
11306 /* Clear vreg_to_lvreg array */
11307 for (i = 0; i < lvregs_len; i++)
11308 vreg_to_lvreg [lvregs [i]] = 0;
11310 } else if (ins->opcode == OP_NOP) {
11312 MONO_INST_NULLIFY_SREGS (ins);
11315 if (cfg->verbose_level > 2)
11316 mono_print_ins_index (1, ins);
11319 /* Extend the live range based on the liveness info */
11320 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11321 for (i = 0; i < cfg->num_varinfo; i ++) {
11322 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11324 if (vreg_is_volatile (cfg, vi->vreg))
11325 /* The liveness info is incomplete */
11328 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11329 /* Live from at least the first ins of this bb */
11330 live_range_start [vi->vreg] = bb->code;
11331 live_range_start_bb [vi->vreg] = bb;
11334 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11335 /* Live at least until the last ins of this bb */
11336 live_range_end [vi->vreg] = bb->last_ins;
11337 live_range_end_bb [vi->vreg] = bb;
11343 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11345 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11346 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11348 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11349 for (i = 0; i < cfg->num_varinfo; ++i) {
11350 int vreg = MONO_VARINFO (cfg, i)->vreg;
11353 if (live_range_start [vreg]) {
11354 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11356 ins->inst_c1 = vreg;
11357 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11359 if (live_range_end [vreg]) {
11360 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11362 ins->inst_c1 = vreg;
11363 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11364 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11366 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11372 g_free (live_range_start);
11373 g_free (live_range_end);
11374 g_free (live_range_start_bb);
11375 g_free (live_range_end_bb);
11380 * - use 'iadd' instead of 'int_add'
11381 * - handling ovf opcodes: decompose in method_to_ir.
11382 * - unify iregs/fregs
11383 * -> partly done, the missing parts are:
11384 * - a more complete unification would involve unifying the hregs as well, so
11385 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11386 * would no longer map to the machine hregs, so the code generators would need to
11387 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11388 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11389 * fp/non-fp branches speeds it up by about 15%.
11390 * - use sext/zext opcodes instead of shifts
11392 * - get rid of TEMPLOADs if possible and use vregs instead
11393 * - clean up usage of OP_P/OP_ opcodes
11394 * - cleanup usage of DUMMY_USE
11395 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11397 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11398 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11399 * - make sure handle_stack_args () is called before the branch is emitted
11400 * - when the new IR is done, get rid of all unused stuff
11401 * - COMPARE/BEQ as separate instructions or unify them ?
11402 * - keeping them separate allows specialized compare instructions like
11403 * compare_imm, compare_membase
11404 * - most back ends unify fp compare+branch, fp compare+ceq
11405 * - integrate mono_save_args into inline_method
11406 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11407 * - handle long shift opts on 32 bit platforms somehow: they require
11408 * 3 sregs (2 for arg1 and 1 for arg2)
11409 * - make byref a 'normal' type.
11410 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11411 * variable if needed.
11412 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11413 * like inline_method.
11414 * - remove inlining restrictions
11415 * - fix LNEG and enable cfold of INEG
11416 * - generalize x86 optimizations like ldelema as a peephole optimization
11417 * - add store_mem_imm for amd64
11418 * - optimize the loading of the interruption flag in the managed->native wrappers
11419 * - avoid special handling of OP_NOP in passes
11420 * - move code inserting instructions into one function/macro.
11421 * - try a coalescing phase after liveness analysis
11422 * - add float -> vreg conversion + local optimizations on !x86
11423 * - figure out how to handle decomposed branches during optimizations, ie.
11424 * compare+branch, op_jump_table+op_br etc.
11425 * - promote RuntimeXHandles to vregs
11426 * - vtype cleanups:
11427 * - add a NEW_VARLOADA_VREG macro
11428 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11429 * accessing vtype fields.
11430 * - get rid of I8CONST on 64 bit platforms
11431 * - dealing with the increase in code size due to branches created during opcode
11433 * - use extended basic blocks
11434 * - all parts of the JIT
11435 * - handle_global_vregs () && local regalloc
11436 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11437 * - sources of increase in code size:
11440 * - isinst and castclass
11441 * - lvregs not allocated to global registers even if used multiple times
11442 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11444 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11445 * - add all micro optimizations from the old JIT
11446 * - put tree optimizations into the deadce pass
11447 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11448 * specific function.
11449 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11450 * fcompare + branchCC.
11451 * - create a helper function for allocating a stack slot, taking into account
11452 * MONO_CFG_HAS_SPILLUP.
11454 * - merge the ia64 switch changes.
11455 * - optimize mono_regstate2_alloc_int/float.
11456 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11457 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11458 * parts of the tree could be separated by other instructions, killing the tree
11459 * arguments, or stores killing loads etc. Also, should we fold loads into other
11460 * instructions if the result of the load is used multiple times ?
11461 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11462 * - LAST MERGE: 108395.
11463 * - when returning vtypes in registers, generate IR and append it to the end of the
11464 * last bb instead of doing it in the epilog.
11465 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11473 - When to decompose opcodes:
11474 - earlier: this makes some optimizations hard to implement, since the low level IR
11475 no longer contains the neccessary information. But it is easier to do.
11476 - later: harder to implement, enables more optimizations.
11477 - Branches inside bblocks:
11478 - created when decomposing complex opcodes.
11479 - branches to another bblock: harmless, but not tracked by the branch
11480 optimizations, so need to branch to a label at the start of the bblock.
11481 - branches to inside the same bblock: very problematic, trips up the local
11482 reg allocator. Can be fixed by spitting the current bblock, but that is a
11483 complex operation, since some local vregs can become global vregs etc.
11484 - Local/global vregs:
11485 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11486 local register allocator.
11487 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11488 structure, created by mono_create_var (). Assigned to hregs or the stack by
11489 the global register allocator.
11490 - When to do optimizations like alu->alu_imm:
11491 - earlier -> saves work later on since the IR will be smaller/simpler
11492 - later -> can work on more instructions
11493 - Handling of valuetypes:
11494 - When a vtype is pushed on the stack, a new temporary is created, an
11495 instruction computing its address (LDADDR) is emitted and pushed on
11496 the stack. Need to optimize cases when the vtype is used immediately as in
11497 argument passing, stloc etc.
11498 - Instead of the to_end stuff in the old JIT, simply call the function handling
11499 the values on the stack before emitting the last instruction of the bb.
11502 #endif /* DISABLE_JIT */