2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2597 int card_table_shift_bits;
2598 gpointer card_table_mask;
2599 guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2600 MonoInst *dummy_use;
2602 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2603 int nursery_shift_bits;
2604 size_t nursery_size;
2606 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2608 if (!cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2611 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2612 wbarrier->sreg1 = ptr->dreg;
2614 wbarrier->sreg2 = value->dreg;
2616 wbarrier->sreg2 = value_reg;
2617 MONO_ADD_INS (cfg->cbb, wbarrier);
2621 int offset_reg = alloc_preg (cfg);
2622 int card_reg = alloc_preg (cfg);
2625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2626 if (card_table_mask)
2627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2629 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2630 * IMM's larger than 32bits.
2632 if (cfg->compile_aot) {
2633 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2635 MONO_INST_NEW (cfg, ins, OP_PCONST);
2636 ins->inst_p0 = card_table;
2637 ins->dreg = card_reg;
2638 MONO_ADD_INS (cfg->cbb, ins);
2641 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2644 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2645 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2649 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2651 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2652 dummy_use->sreg1 = value_reg;
2653 MONO_ADD_INS (cfg->cbb, dummy_use);
2659 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2661 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2662 unsigned need_wb = 0;
2667 /*types with references can't have alignment smaller than sizeof(void*) */
2668 if (align < SIZEOF_VOID_P)
2671 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2672 if (size > 32 * SIZEOF_VOID_P)
2675 create_write_barrier_bitmap (klass, &need_wb, 0);
2677 /* We don't unroll more than 5 stores to avoid code bloat. */
2678 if (size > 5 * SIZEOF_VOID_P) {
2679 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2680 size += (SIZEOF_VOID_P - 1);
2681 size &= ~(SIZEOF_VOID_P - 1);
2683 EMIT_NEW_ICONST (cfg, iargs [2], size);
2684 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2685 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2689 destreg = iargs [0]->dreg;
2690 srcreg = iargs [1]->dreg;
2693 dest_ptr_reg = alloc_preg (cfg);
2694 tmp_reg = alloc_preg (cfg);
2697 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2699 while (size >= SIZEOF_VOID_P) {
2700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2704 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2706 offset += SIZEOF_VOID_P;
2707 size -= SIZEOF_VOID_P;
2710 /*tmp += sizeof (void*)*/
2711 if (size >= SIZEOF_VOID_P) {
2712 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2713 MONO_ADD_INS (cfg->cbb, iargs [0]);
2717 /* Those cannot be references since size < sizeof (void*) */
2719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2727 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2743 * Emit code to copy a valuetype of type @klass whose address is stored in
2744 * @src->dreg to memory whose address is stored at @dest->dreg.
2747 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2749 MonoInst *iargs [4];
2752 MonoMethod *memcpy_method;
2756 * This check breaks with spilled vars... need to handle it during verification anyway.
2757 * g_assert (klass && klass == src->klass && klass == dest->klass);
2761 n = mono_class_native_size (klass, &align);
2763 n = mono_class_value_size (klass, &align);
2765 /* if native is true there should be no references in the struct */
2766 if (cfg->gen_write_barriers && klass->has_references && !native) {
2767 /* Avoid barriers when storing to the stack */
2768 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2769 (dest->opcode == OP_LDADDR))) {
2770 int context_used = 0;
2775 if (cfg->generic_sharing_context)
2776 context_used = mono_class_check_context_used (klass);
2778 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2779 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2781 } else if (context_used) {
2782 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2784 if (cfg->compile_aot) {
2785 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2787 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2788 mono_class_compute_gc_descriptor (klass);
2792 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2797 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2798 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2799 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2803 EMIT_NEW_ICONST (cfg, iargs [2], n);
2805 memcpy_method = get_memcpy_method ();
2806 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2811 get_memset_method (void)
2813 static MonoMethod *memset_method = NULL;
2814 if (!memset_method) {
2815 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2817 g_error ("Old corlib found. Install a new one");
2819 return memset_method;
2823 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2825 MonoInst *iargs [3];
2828 MonoMethod *memset_method;
2830 /* FIXME: Optimize this for the case when dest is an LDADDR */
2832 mono_class_init (klass);
2833 n = mono_class_value_size (klass, &align);
2835 if (n <= sizeof (gpointer) * 5) {
2836 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2839 memset_method = get_memset_method ();
2841 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2842 EMIT_NEW_ICONST (cfg, iargs [2], n);
2843 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2848 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2850 MonoInst *this = NULL;
2852 g_assert (cfg->generic_sharing_context);
2854 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2855 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2856 !method->klass->valuetype)
2857 EMIT_NEW_ARGLOAD (cfg, this, 0);
2859 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2860 MonoInst *mrgctx_loc, *mrgctx_var;
2863 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2865 mrgctx_loc = mono_get_vtable_var (cfg);
2866 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2869 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2870 MonoInst *vtable_loc, *vtable_var;
2874 vtable_loc = mono_get_vtable_var (cfg);
2875 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2877 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2878 MonoInst *mrgctx_var = vtable_var;
2881 vtable_reg = alloc_preg (cfg);
2882 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2883 vtable_var->type = STACK_PTR;
2889 int vtable_reg, res_reg;
2891 vtable_reg = alloc_preg (cfg);
2892 res_reg = alloc_preg (cfg);
2893 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2898 static MonoJumpInfoRgctxEntry *
2899 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2901 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2902 res->method = method;
2903 res->in_mrgctx = in_mrgctx;
2904 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2905 res->data->type = patch_type;
2906 res->data->data.target = patch_data;
2907 res->info_type = info_type;
2912 static inline MonoInst*
2913 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2915 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2919 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2920 MonoClass *klass, int rgctx_type)
2922 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2923 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2925 return emit_rgctx_fetch (cfg, rgctx, entry);
2929 * emit_get_rgctx_method:
2931 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2932 * normal constants, else emit a load from the rgctx.
2935 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2936 MonoMethod *cmethod, int rgctx_type)
2938 if (!context_used) {
2941 switch (rgctx_type) {
2942 case MONO_RGCTX_INFO_METHOD:
2943 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2945 case MONO_RGCTX_INFO_METHOD_RGCTX:
2946 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2949 g_assert_not_reached ();
2952 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2953 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2955 return emit_rgctx_fetch (cfg, rgctx, entry);
2960 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2961 MonoClassField *field, int rgctx_type)
2963 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2964 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2966 return emit_rgctx_fetch (cfg, rgctx, entry);
2970 * On return the caller must check @klass for load errors.
2973 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2975 MonoInst *vtable_arg;
2977 int context_used = 0;
2979 if (cfg->generic_sharing_context)
2980 context_used = mono_class_check_context_used (klass);
2983 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2984 klass, MONO_RGCTX_INFO_VTABLE);
2986 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2990 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2993 if (COMPILE_LLVM (cfg))
2994 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2996 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2997 #ifdef MONO_ARCH_VTABLE_REG
2998 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2999 cfg->uses_vtable_reg = TRUE;
3006 * On return the caller must check @array_class for load errors
3009 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3011 int vtable_reg = alloc_preg (cfg);
3012 int context_used = 0;
3014 if (cfg->generic_sharing_context)
3015 context_used = mono_class_check_context_used (array_class);
3017 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3019 if (cfg->opt & MONO_OPT_SHARED) {
3020 int class_reg = alloc_preg (cfg);
3021 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3022 if (cfg->compile_aot) {
3023 int klass_reg = alloc_preg (cfg);
3024 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3025 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3029 } else if (context_used) {
3030 MonoInst *vtable_ins;
3032 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3033 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3035 if (cfg->compile_aot) {
3039 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3041 vt_reg = alloc_preg (cfg);
3042 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3043 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3046 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3052 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3056 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3058 if (mini_get_debug_options ()->better_cast_details) {
3059 int to_klass_reg = alloc_preg (cfg);
3060 int vtable_reg = alloc_preg (cfg);
3061 int klass_reg = alloc_preg (cfg);
3062 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3065 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3069 MONO_ADD_INS (cfg->cbb, tls_get);
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3073 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3074 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3075 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3080 reset_cast_details (MonoCompile *cfg)
3082 /* Reset the variables holding the cast details */
3083 if (mini_get_debug_options ()->better_cast_details) {
3084 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3086 MONO_ADD_INS (cfg->cbb, tls_get);
3087 /* It is enough to reset the from field */
3088 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3093 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3094 * generic code is generated.
3097 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3099 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3102 MonoInst *rgctx, *addr;
3104 /* FIXME: What if the class is shared? We might not
3105 have to get the address of the method from the
3107 addr = emit_get_rgctx_method (cfg, context_used, method,
3108 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3110 rgctx = emit_get_rgctx (cfg, method, context_used);
3112 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3114 return mono_emit_method_call (cfg, method, &val, NULL);
3119 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3123 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3124 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3125 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3126 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3128 obj_reg = sp [0]->dreg;
3129 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3130 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3132 /* FIXME: generics */
3133 g_assert (klass->rank == 0);
3136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3137 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3143 MonoInst *element_class;
3145 /* This assertion is from the unboxcast insn */
3146 g_assert (klass->rank == 0);
3148 element_class = emit_get_rgctx_klass (cfg, context_used,
3149 klass->element_class, MONO_RGCTX_INFO_KLASS);
3151 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3152 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3154 save_cast_details (cfg, klass->element_class, obj_reg);
3155 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3156 reset_cast_details (cfg);
3159 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3160 MONO_ADD_INS (cfg->cbb, add);
3161 add->type = STACK_MP;
3168 * Returns NULL and set the cfg exception on error.
3171 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3173 MonoInst *iargs [2];
3179 MonoInst *iargs [2];
3182 FIXME: we cannot get managed_alloc here because we can't get
3183 the class's vtable (because it's not a closed class)
3185 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3186 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3189 if (cfg->opt & MONO_OPT_SHARED)
3190 rgctx_info = MONO_RGCTX_INFO_KLASS;
3192 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3193 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3195 if (cfg->opt & MONO_OPT_SHARED) {
3196 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3198 alloc_ftn = mono_object_new;
3201 alloc_ftn = mono_object_new_specific;
3204 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3207 if (cfg->opt & MONO_OPT_SHARED) {
3208 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3209 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3211 alloc_ftn = mono_object_new;
3212 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3213 /* This happens often in argument checking code, eg. throw new FooException... */
3214 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3215 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3216 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3218 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3219 MonoMethod *managed_alloc = NULL;
3223 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3224 cfg->exception_ptr = klass;
3228 #ifndef MONO_CROSS_COMPILE
3229 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3232 if (managed_alloc) {
3233 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3234 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3236 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3238 guint32 lw = vtable->klass->instance_size;
3239 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3240 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3241 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3244 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3248 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3252 * Returns NULL and set the cfg exception on error.
3255 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3257 MonoInst *alloc, *ins;
3259 if (mono_class_is_nullable (klass)) {
3260 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3263 /* FIXME: What if the class is shared? We might not
3264 have to get the method address from the RGCTX. */
3265 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3266 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3267 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3269 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3271 return mono_emit_method_call (cfg, method, &val, NULL);
3275 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3279 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3284 // FIXME: This doesn't work yet (class libs tests fail?)
3285 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3288 * Returns NULL and set the cfg exception on error.
3291 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3293 MonoBasicBlock *is_null_bb;
3294 int obj_reg = src->dreg;
3295 int vtable_reg = alloc_preg (cfg);
3296 MonoInst *klass_inst = NULL;
3301 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3302 klass, MONO_RGCTX_INFO_KLASS);
3304 if (is_complex_isinst (klass)) {
3305 /* Complex case, handle by an icall */
3311 args [1] = klass_inst;
3313 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3315 /* Simple case, handled by the code below */
3319 NEW_BBLOCK (cfg, is_null_bb);
3321 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3322 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3324 save_cast_details (cfg, klass, obj_reg);
3326 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3327 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3328 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3330 int klass_reg = alloc_preg (cfg);
3332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3334 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3335 /* the remoting code is broken, access the class for now */
3336 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3337 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3339 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3340 cfg->exception_ptr = klass;
3343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3348 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3351 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3355 MONO_START_BB (cfg, is_null_bb);
3357 reset_cast_details (cfg);
3363 * Returns NULL and set the cfg exception on error.
3366 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3369 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3370 int obj_reg = src->dreg;
3371 int vtable_reg = alloc_preg (cfg);
3372 int res_reg = alloc_preg (cfg);
3373 MonoInst *klass_inst = NULL;
3376 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3378 if (is_complex_isinst (klass)) {
3381 /* Complex case, handle by an icall */
3387 args [1] = klass_inst;
3389 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3391 /* Simple case, the code below can handle it */
3395 NEW_BBLOCK (cfg, is_null_bb);
3396 NEW_BBLOCK (cfg, false_bb);
3397 NEW_BBLOCK (cfg, end_bb);
3399 /* Do the assignment at the beginning, so the other assignment can be if converted */
3400 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3401 ins->type = STACK_OBJ;
3404 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3405 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3409 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3410 g_assert (!context_used);
3411 /* the is_null_bb target simply copies the input register to the output */
3412 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3414 int klass_reg = alloc_preg (cfg);
3417 int rank_reg = alloc_preg (cfg);
3418 int eclass_reg = alloc_preg (cfg);
3420 g_assert (!context_used);
3421 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3424 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3426 if (klass->cast_class == mono_defaults.object_class) {
3427 int parent_reg = alloc_preg (cfg);
3428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3429 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3430 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3431 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3432 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3433 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3434 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3435 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3436 } else if (klass->cast_class == mono_defaults.enum_class) {
3437 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3439 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3440 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3442 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3443 /* Check that the object is a vector too */
3444 int bounds_reg = alloc_preg (cfg);
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3450 /* the is_null_bb target simply copies the input register to the output */
3451 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3453 } else if (mono_class_is_nullable (klass)) {
3454 g_assert (!context_used);
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3456 /* the is_null_bb target simply copies the input register to the output */
3457 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3459 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3460 g_assert (!context_used);
3461 /* the remoting code is broken, access the class for now */
3462 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3463 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3465 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3466 cfg->exception_ptr = klass;
3469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3475 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3477 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3478 /* the is_null_bb target simply copies the input register to the output */
3479 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3484 MONO_START_BB (cfg, false_bb);
3486 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3487 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3489 MONO_START_BB (cfg, is_null_bb);
3491 MONO_START_BB (cfg, end_bb);
3497 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3499 /* This opcode takes as input an object reference and a class, and returns:
3500 0) if the object is an instance of the class,
3501 1) if the object is not instance of the class,
3502 2) if the object is a proxy whose type cannot be determined */
3505 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3506 int obj_reg = src->dreg;
3507 int dreg = alloc_ireg (cfg);
3509 int klass_reg = alloc_preg (cfg);
3511 NEW_BBLOCK (cfg, true_bb);
3512 NEW_BBLOCK (cfg, false_bb);
3513 NEW_BBLOCK (cfg, false2_bb);
3514 NEW_BBLOCK (cfg, end_bb);
3515 NEW_BBLOCK (cfg, no_proxy_bb);
3517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3520 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3521 NEW_BBLOCK (cfg, interface_fail_bb);
3523 tmp_reg = alloc_preg (cfg);
3524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3525 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3526 MONO_START_BB (cfg, interface_fail_bb);
3527 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3529 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3531 tmp_reg = alloc_preg (cfg);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3534 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3536 tmp_reg = alloc_preg (cfg);
3537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3540 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3541 tmp_reg = alloc_preg (cfg);
3542 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3543 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3545 tmp_reg = alloc_preg (cfg);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3550 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3551 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3553 MONO_START_BB (cfg, no_proxy_bb);
3555 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3558 MONO_START_BB (cfg, false_bb);
3560 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3563 MONO_START_BB (cfg, false2_bb);
3565 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3568 MONO_START_BB (cfg, true_bb);
3570 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3572 MONO_START_BB (cfg, end_bb);
3575 MONO_INST_NEW (cfg, ins, OP_ICONST);
3577 ins->type = STACK_I4;
3583 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3585 /* This opcode takes as input an object reference and a class, and returns:
3586 0) if the object is an instance of the class,
3587 1) if the object is a proxy whose type cannot be determined
3588 an InvalidCastException exception is thrown otherwhise*/
3591 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3592 int obj_reg = src->dreg;
3593 int dreg = alloc_ireg (cfg);
3594 int tmp_reg = alloc_preg (cfg);
3595 int klass_reg = alloc_preg (cfg);
3597 NEW_BBLOCK (cfg, end_bb);
3598 NEW_BBLOCK (cfg, ok_result_bb);
3600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3603 save_cast_details (cfg, klass, obj_reg);
3605 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3606 NEW_BBLOCK (cfg, interface_fail_bb);
3608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3609 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3610 MONO_START_BB (cfg, interface_fail_bb);
3611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3613 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3615 tmp_reg = alloc_preg (cfg);
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3618 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3620 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3621 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3624 NEW_BBLOCK (cfg, no_proxy_bb);
3626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3628 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3630 tmp_reg = alloc_preg (cfg);
3631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3634 tmp_reg = alloc_preg (cfg);
3635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3637 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3639 NEW_BBLOCK (cfg, fail_1_bb);
3641 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3643 MONO_START_BB (cfg, fail_1_bb);
3645 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3648 MONO_START_BB (cfg, no_proxy_bb);
3650 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3653 MONO_START_BB (cfg, ok_result_bb);
3655 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3657 MONO_START_BB (cfg, end_bb);
3660 MONO_INST_NEW (cfg, ins, OP_ICONST);
3662 ins->type = STACK_I4;
3668 * Returns NULL and set the cfg exception on error.
3670 static G_GNUC_UNUSED MonoInst*
3671 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3675 gpointer *trampoline;
3676 MonoInst *obj, *method_ins, *tramp_ins;
3680 obj = handle_alloc (cfg, klass, FALSE, 0);
3684 /* Inline the contents of mono_delegate_ctor */
3686 /* Set target field */
3687 /* Optimize away setting of NULL target */
3688 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3690 if (cfg->gen_write_barriers) {
3691 dreg = alloc_preg (cfg);
3692 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3693 emit_write_barrier (cfg, ptr, target, 0);
3697 /* Set method field */
3698 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3700 if (cfg->gen_write_barriers) {
3701 dreg = alloc_preg (cfg);
3702 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3703 emit_write_barrier (cfg, ptr, method_ins, 0);
3706 * To avoid looking up the compiled code belonging to the target method
3707 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3708 * store it, and we fill it after the method has been compiled.
3710 if (!cfg->compile_aot && !method->dynamic) {
3711 MonoInst *code_slot_ins;
3714 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3716 domain = mono_domain_get ();
3717 mono_domain_lock (domain);
3718 if (!domain_jit_info (domain)->method_code_hash)
3719 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3720 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3722 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3723 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3725 mono_domain_unlock (domain);
3727 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3732 /* Set invoke_impl field */
3733 if (cfg->compile_aot) {
3734 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3736 trampoline = mono_create_delegate_trampoline (klass);
3737 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3739 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3741 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3747 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3749 MonoJitICallInfo *info;
3751 /* Need to register the icall so it gets an icall wrapper */
3752 info = mono_get_array_new_va_icall (rank);
3754 cfg->flags |= MONO_CFG_HAS_VARARGS;
3756 /* mono_array_new_va () needs a vararg calling convention */
3757 cfg->disable_llvm = TRUE;
3759 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3760 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3764 mono_emit_load_got_addr (MonoCompile *cfg)
3766 MonoInst *getaddr, *dummy_use;
3768 if (!cfg->got_var || cfg->got_var_allocated)
3771 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3772 getaddr->dreg = cfg->got_var->dreg;
3774 /* Add it to the start of the first bblock */
3775 if (cfg->bb_entry->code) {
3776 getaddr->next = cfg->bb_entry->code;
3777 cfg->bb_entry->code = getaddr;
3780 MONO_ADD_INS (cfg->bb_entry, getaddr);
3782 cfg->got_var_allocated = TRUE;
3785 * Add a dummy use to keep the got_var alive, since real uses might
3786 * only be generated by the back ends.
3787 * Add it to end_bblock, so the variable's lifetime covers the whole
3789 * It would be better to make the usage of the got var explicit in all
3790 * cases when the backend needs it (i.e. calls, throw etc.), so this
3791 * wouldn't be needed.
3793 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3794 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3797 static int inline_limit;
3798 static gboolean inline_limit_inited;
3801 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3803 MonoMethodHeaderSummary header;
3805 #ifdef MONO_ARCH_SOFT_FLOAT
3806 MonoMethodSignature *sig = mono_method_signature (method);
3810 if (cfg->generic_sharing_context)
3813 if (cfg->inline_depth > 10)
3816 #ifdef MONO_ARCH_HAVE_LMF_OPS
3817 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3818 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3819 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3824 if (!mono_method_get_header_summary (method, &header))
3827 /*runtime, icall and pinvoke are checked by summary call*/
3828 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3829 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3830 (method->klass->marshalbyref) ||
3834 /* also consider num_locals? */
3835 /* Do the size check early to avoid creating vtables */
3836 if (!inline_limit_inited) {
3837 if (getenv ("MONO_INLINELIMIT"))
3838 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3840 inline_limit = INLINE_LENGTH_LIMIT;
3841 inline_limit_inited = TRUE;
3843 if (header.code_size >= inline_limit)
3847 * if we can initialize the class of the method right away, we do,
3848 * otherwise we don't allow inlining if the class needs initialization,
3849 * since it would mean inserting a call to mono_runtime_class_init()
3850 * inside the inlined code
3852 if (!(cfg->opt & MONO_OPT_SHARED)) {
3853 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3854 if (cfg->run_cctors && method->klass->has_cctor) {
3855 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3856 if (!method->klass->runtime_info)
3857 /* No vtable created yet */
3859 vtable = mono_class_vtable (cfg->domain, method->klass);
3862 /* This makes so that inline cannot trigger */
3863 /* .cctors: too many apps depend on them */
3864 /* running with a specific order... */
3865 if (! vtable->initialized)
3867 mono_runtime_class_init (vtable);
3869 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3870 if (!method->klass->runtime_info)
3871 /* No vtable created yet */
3873 vtable = mono_class_vtable (cfg->domain, method->klass);
3876 if (!vtable->initialized)
3881 * If we're compiling for shared code
3882 * the cctor will need to be run at aot method load time, for example,
3883 * or at the end of the compilation of the inlining method.
3885 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3890 * CAS - do not inline methods with declarative security
3891 * Note: this has to be before any possible return TRUE;
3893 if (mono_method_has_declsec (method))
3896 #ifdef MONO_ARCH_SOFT_FLOAT
3898 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3900 for (i = 0; i < sig->param_count; ++i)
3901 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3909 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3911 if (vtable->initialized && !cfg->compile_aot)
3914 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3917 if (!mono_class_needs_cctor_run (vtable->klass, method))
3920 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3921 /* The initialization is already done before the method is called */
3928 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3932 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3934 mono_class_init (klass);
3935 size = mono_class_array_element_size (klass);
3937 mult_reg = alloc_preg (cfg);
3938 array_reg = arr->dreg;
3939 index_reg = index->dreg;
3941 #if SIZEOF_REGISTER == 8
3942 /* The array reg is 64 bits but the index reg is only 32 */
3943 if (COMPILE_LLVM (cfg)) {
3945 index2_reg = index_reg;
3947 index2_reg = alloc_preg (cfg);
3948 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3951 if (index->type == STACK_I8) {
3952 index2_reg = alloc_preg (cfg);
3953 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3955 index2_reg = index_reg;
3960 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3962 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3963 if (size == 1 || size == 2 || size == 4 || size == 8) {
3964 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3966 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3967 ins->type = STACK_PTR;
3973 add_reg = alloc_preg (cfg);
3975 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3976 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3977 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3978 ins->type = STACK_PTR;
3979 MONO_ADD_INS (cfg->cbb, ins);
3984 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3986 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3988 int bounds_reg = alloc_preg (cfg);
3989 int add_reg = alloc_preg (cfg);
3990 int mult_reg = alloc_preg (cfg);
3991 int mult2_reg = alloc_preg (cfg);
3992 int low1_reg = alloc_preg (cfg);
3993 int low2_reg = alloc_preg (cfg);
3994 int high1_reg = alloc_preg (cfg);
3995 int high2_reg = alloc_preg (cfg);
3996 int realidx1_reg = alloc_preg (cfg);
3997 int realidx2_reg = alloc_preg (cfg);
3998 int sum_reg = alloc_preg (cfg);
4003 mono_class_init (klass);
4004 size = mono_class_array_element_size (klass);
4006 index1 = index_ins1->dreg;
4007 index2 = index_ins2->dreg;
4009 /* range checking */
4010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4011 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4013 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4014 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4015 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4016 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4017 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4018 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4019 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4021 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4022 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4023 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4024 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4025 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4026 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4027 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4029 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4030 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4032 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4033 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4035 ins->type = STACK_MP;
4037 MONO_ADD_INS (cfg->cbb, ins);
4044 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4048 MonoMethod *addr_method;
4051 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4054 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4056 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4057 /* emit_ldelema_2 depends on OP_LMUL */
4058 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4059 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4063 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4064 addr_method = mono_marshal_get_array_address (rank, element_size);
4065 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4070 static MonoBreakPolicy
4071 always_insert_breakpoint (MonoMethod *method)
4073 return MONO_BREAK_POLICY_ALWAYS;
4076 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4079 * mono_set_break_policy:
4080 * policy_callback: the new callback function
4082 * Allow embedders to decide wherther to actually obey breakpoint instructions
4083 * (both break IL instructions and Debugger.Break () method calls), for example
4084 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4085 * untrusted or semi-trusted code.
4087 * @policy_callback will be called every time a break point instruction needs to
4088 * be inserted with the method argument being the method that calls Debugger.Break()
4089 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4090 * if it wants the breakpoint to not be effective in the given method.
4091 * #MONO_BREAK_POLICY_ALWAYS is the default.
4094 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4096 if (policy_callback)
4097 break_policy_func = policy_callback;
4099 break_policy_func = always_insert_breakpoint;
4103 should_insert_brekpoint (MonoMethod *method) {
4104 switch (break_policy_func (method)) {
4105 case MONO_BREAK_POLICY_ALWAYS:
4107 case MONO_BREAK_POLICY_NEVER:
4109 case MONO_BREAK_POLICY_ON_DBG:
4110 return mono_debug_using_mono_debugger ();
4112 g_warning ("Incorrect value returned from break policy callback");
4117 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4119 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4121 MonoInst *addr, *store, *load;
4122 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4124 /* the bounds check is already done by the callers */
4125 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4127 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4128 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4130 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4131 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4137 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4139 MonoInst *ins = NULL;
4140 #ifdef MONO_ARCH_SIMD_INTRINSICS
4141 if (cfg->opt & MONO_OPT_SIMD) {
4142 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4152 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4154 MonoInst *ins = NULL;
4156 static MonoClass *runtime_helpers_class = NULL;
4157 if (! runtime_helpers_class)
4158 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4159 "System.Runtime.CompilerServices", "RuntimeHelpers");
4161 if (cmethod->klass == mono_defaults.string_class) {
4162 if (strcmp (cmethod->name, "get_Chars") == 0) {
4163 int dreg = alloc_ireg (cfg);
4164 int index_reg = alloc_preg (cfg);
4165 int mult_reg = alloc_preg (cfg);
4166 int add_reg = alloc_preg (cfg);
4168 #if SIZEOF_REGISTER == 8
4169 /* The array reg is 64 bits but the index reg is only 32 */
4170 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4172 index_reg = args [1]->dreg;
4174 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4176 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4177 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4178 add_reg = ins->dreg;
4179 /* Avoid a warning */
4181 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4185 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4186 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4187 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4189 type_from_op (ins, NULL, NULL);
4191 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4192 int dreg = alloc_ireg (cfg);
4193 /* Decompose later to allow more optimizations */
4194 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4195 ins->type = STACK_I4;
4196 ins->flags |= MONO_INST_FAULT;
4197 cfg->cbb->has_array_access = TRUE;
4198 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4201 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4202 int mult_reg = alloc_preg (cfg);
4203 int add_reg = alloc_preg (cfg);
4205 /* The corlib functions check for oob already. */
4206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4207 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4208 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4209 return cfg->cbb->last_ins;
4212 } else if (cmethod->klass == mono_defaults.object_class) {
4214 if (strcmp (cmethod->name, "GetType") == 0) {
4215 int dreg = alloc_preg (cfg);
4216 int vt_reg = alloc_preg (cfg);
4217 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4218 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4219 type_from_op (ins, NULL, NULL);
4222 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4223 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4224 int dreg = alloc_ireg (cfg);
4225 int t1 = alloc_ireg (cfg);
4227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4228 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4229 ins->type = STACK_I4;
4233 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4234 MONO_INST_NEW (cfg, ins, OP_NOP);
4235 MONO_ADD_INS (cfg->cbb, ins);
4239 } else if (cmethod->klass == mono_defaults.array_class) {
4240 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4241 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4243 #ifndef MONO_BIG_ARRAYS
4245 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4248 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4249 int dreg = alloc_ireg (cfg);
4250 int bounds_reg = alloc_ireg (cfg);
4251 MonoBasicBlock *end_bb, *szarray_bb;
4252 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4254 NEW_BBLOCK (cfg, end_bb);
4255 NEW_BBLOCK (cfg, szarray_bb);
4257 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4258 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4259 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4260 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4261 /* Non-szarray case */
4263 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4264 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4266 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4267 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4268 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4269 MONO_START_BB (cfg, szarray_bb);
4272 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4273 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4275 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4276 MONO_START_BB (cfg, end_bb);
4278 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4279 ins->type = STACK_I4;
4285 if (cmethod->name [0] != 'g')
4288 if (strcmp (cmethod->name, "get_Rank") == 0) {
4289 int dreg = alloc_ireg (cfg);
4290 int vtable_reg = alloc_preg (cfg);
4291 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4292 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4293 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4294 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4295 type_from_op (ins, NULL, NULL);
4298 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4299 int dreg = alloc_ireg (cfg);
4301 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4302 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4303 type_from_op (ins, NULL, NULL);
4308 } else if (cmethod->klass == runtime_helpers_class) {
4310 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4311 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4315 } else if (cmethod->klass == mono_defaults.thread_class) {
4316 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4317 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4318 MONO_ADD_INS (cfg->cbb, ins);
4320 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4321 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4322 MONO_ADD_INS (cfg->cbb, ins);
4325 } else if (cmethod->klass == mono_defaults.monitor_class) {
4326 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4327 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4330 if (COMPILE_LLVM (cfg)) {
4332 * Pass the argument normally, the LLVM backend will handle the
4333 * calling convention problems.
4335 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4337 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4338 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4339 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4340 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4343 return (MonoInst*)call;
4344 } else if (strcmp (cmethod->name, "Exit") == 0) {
4347 if (COMPILE_LLVM (cfg)) {
4348 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4350 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4351 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4352 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4353 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4356 return (MonoInst*)call;
4358 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4359 MonoMethod *fast_method = NULL;
4361 /* Avoid infinite recursion */
4362 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4363 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4364 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4367 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4368 strcmp (cmethod->name, "Exit") == 0)
4369 fast_method = mono_monitor_get_fast_path (cmethod);
4373 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4375 } else if (cmethod->klass->image == mono_defaults.corlib &&
4376 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4377 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4380 #if SIZEOF_REGISTER == 8
4381 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4382 /* 64 bit reads are already atomic */
4383 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4384 ins->dreg = mono_alloc_preg (cfg);
4385 ins->inst_basereg = args [0]->dreg;
4386 ins->inst_offset = 0;
4387 MONO_ADD_INS (cfg->cbb, ins);
4391 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4392 if (strcmp (cmethod->name, "Increment") == 0) {
4393 MonoInst *ins_iconst;
4396 if (fsig->params [0]->type == MONO_TYPE_I4)
4397 opcode = OP_ATOMIC_ADD_NEW_I4;
4398 #if SIZEOF_REGISTER == 8
4399 else if (fsig->params [0]->type == MONO_TYPE_I8)
4400 opcode = OP_ATOMIC_ADD_NEW_I8;
4403 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4404 ins_iconst->inst_c0 = 1;
4405 ins_iconst->dreg = mono_alloc_ireg (cfg);
4406 MONO_ADD_INS (cfg->cbb, ins_iconst);
4408 MONO_INST_NEW (cfg, ins, opcode);
4409 ins->dreg = mono_alloc_ireg (cfg);
4410 ins->inst_basereg = args [0]->dreg;
4411 ins->inst_offset = 0;
4412 ins->sreg2 = ins_iconst->dreg;
4413 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4414 MONO_ADD_INS (cfg->cbb, ins);
4416 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4417 MonoInst *ins_iconst;
4420 if (fsig->params [0]->type == MONO_TYPE_I4)
4421 opcode = OP_ATOMIC_ADD_NEW_I4;
4422 #if SIZEOF_REGISTER == 8
4423 else if (fsig->params [0]->type == MONO_TYPE_I8)
4424 opcode = OP_ATOMIC_ADD_NEW_I8;
4427 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4428 ins_iconst->inst_c0 = -1;
4429 ins_iconst->dreg = mono_alloc_ireg (cfg);
4430 MONO_ADD_INS (cfg->cbb, ins_iconst);
4432 MONO_INST_NEW (cfg, ins, opcode);
4433 ins->dreg = mono_alloc_ireg (cfg);
4434 ins->inst_basereg = args [0]->dreg;
4435 ins->inst_offset = 0;
4436 ins->sreg2 = ins_iconst->dreg;
4437 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4438 MONO_ADD_INS (cfg->cbb, ins);
4440 } else if (strcmp (cmethod->name, "Add") == 0) {
4443 if (fsig->params [0]->type == MONO_TYPE_I4)
4444 opcode = OP_ATOMIC_ADD_NEW_I4;
4445 #if SIZEOF_REGISTER == 8
4446 else if (fsig->params [0]->type == MONO_TYPE_I8)
4447 opcode = OP_ATOMIC_ADD_NEW_I8;
4451 MONO_INST_NEW (cfg, ins, opcode);
4452 ins->dreg = mono_alloc_ireg (cfg);
4453 ins->inst_basereg = args [0]->dreg;
4454 ins->inst_offset = 0;
4455 ins->sreg2 = args [1]->dreg;
4456 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4457 MONO_ADD_INS (cfg->cbb, ins);
4460 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4462 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4463 if (strcmp (cmethod->name, "Exchange") == 0) {
4465 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4467 if (fsig->params [0]->type == MONO_TYPE_I4)
4468 opcode = OP_ATOMIC_EXCHANGE_I4;
4469 #if SIZEOF_REGISTER == 8
4470 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4471 (fsig->params [0]->type == MONO_TYPE_I))
4472 opcode = OP_ATOMIC_EXCHANGE_I8;
4474 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4475 opcode = OP_ATOMIC_EXCHANGE_I4;
4480 MONO_INST_NEW (cfg, ins, opcode);
4481 ins->dreg = mono_alloc_ireg (cfg);
4482 ins->inst_basereg = args [0]->dreg;
4483 ins->inst_offset = 0;
4484 ins->sreg2 = args [1]->dreg;
4485 MONO_ADD_INS (cfg->cbb, ins);
4487 switch (fsig->params [0]->type) {
4489 ins->type = STACK_I4;
4493 ins->type = STACK_I8;
4495 case MONO_TYPE_OBJECT:
4496 ins->type = STACK_OBJ;
4499 g_assert_not_reached ();
4502 if (cfg->gen_write_barriers && is_ref)
4503 emit_write_barrier (cfg, args [0], args [1], -1);
4505 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4507 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4508 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4510 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4511 if (fsig->params [1]->type == MONO_TYPE_I4)
4513 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4514 size = sizeof (gpointer);
4515 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4518 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4519 ins->dreg = alloc_ireg (cfg);
4520 ins->sreg1 = args [0]->dreg;
4521 ins->sreg2 = args [1]->dreg;
4522 ins->sreg3 = args [2]->dreg;
4523 ins->type = STACK_I4;
4524 MONO_ADD_INS (cfg->cbb, ins);
4525 } else if (size == 8) {
4526 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4527 ins->dreg = alloc_ireg (cfg);
4528 ins->sreg1 = args [0]->dreg;
4529 ins->sreg2 = args [1]->dreg;
4530 ins->sreg3 = args [2]->dreg;
4531 ins->type = STACK_I8;
4532 MONO_ADD_INS (cfg->cbb, ins);
4534 /* g_assert_not_reached (); */
4536 if (cfg->gen_write_barriers && is_ref)
4537 emit_write_barrier (cfg, args [0], args [1], -1);
4539 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4543 } else if (cmethod->klass->image == mono_defaults.corlib) {
4544 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4545 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4546 if (should_insert_brekpoint (cfg->method))
4547 MONO_INST_NEW (cfg, ins, OP_BREAK);
4549 MONO_INST_NEW (cfg, ins, OP_NOP);
4550 MONO_ADD_INS (cfg->cbb, ins);
4553 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4554 && strcmp (cmethod->klass->name, "Environment") == 0) {
4556 EMIT_NEW_ICONST (cfg, ins, 1);
4558 EMIT_NEW_ICONST (cfg, ins, 0);
4562 } else if (cmethod->klass == mono_defaults.math_class) {
4564 * There is general branches code for Min/Max, but it does not work for
4566 * http://everything2.com/?node_id=1051618
4570 #ifdef MONO_ARCH_SIMD_INTRINSICS
4571 if (cfg->opt & MONO_OPT_SIMD) {
4572 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4578 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4582 * This entry point could be used later for arbitrary method
4585 inline static MonoInst*
4586 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4587 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4589 if (method->klass == mono_defaults.string_class) {
4590 /* managed string allocation support */
4591 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4592 MonoInst *iargs [2];
4593 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4594 MonoMethod *managed_alloc = NULL;
4596 g_assert (vtable); /*Should not fail since it System.String*/
4597 #ifndef MONO_CROSS_COMPILE
4598 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4602 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4603 iargs [1] = args [0];
4604 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4611 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4613 MonoInst *store, *temp;
4616 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4617 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4620 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4621 * would be different than the MonoInst's used to represent arguments, and
4622 * the ldelema implementation can't deal with that.
4623 * Solution: When ldelema is used on an inline argument, create a var for
4624 * it, emit ldelema on that var, and emit the saving code below in
4625 * inline_method () if needed.
4627 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4628 cfg->args [i] = temp;
4629 /* This uses cfg->args [i] which is set by the preceeding line */
4630 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4631 store->cil_code = sp [0]->cil_code;
4636 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4637 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4639 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4641 check_inline_called_method_name_limit (MonoMethod *called_method)
4644 static char *limit = NULL;
4646 if (limit == NULL) {
4647 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4649 if (limit_string != NULL)
4650 limit = limit_string;
4652 limit = (char *) "";
4655 if (limit [0] != '\0') {
4656 char *called_method_name = mono_method_full_name (called_method, TRUE);
4658 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4659 g_free (called_method_name);
4661 //return (strncmp_result <= 0);
4662 return (strncmp_result == 0);
4669 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4671 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4674 static char *limit = NULL;
4676 if (limit == NULL) {
4677 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4678 if (limit_string != NULL) {
4679 limit = limit_string;
4681 limit = (char *) "";
4685 if (limit [0] != '\0') {
4686 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4688 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4689 g_free (caller_method_name);
4691 //return (strncmp_result <= 0);
4692 return (strncmp_result == 0);
4700 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4701 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4703 MonoInst *ins, *rvar = NULL;
4704 MonoMethodHeader *cheader;
4705 MonoBasicBlock *ebblock, *sbblock;
4707 MonoMethod *prev_inlined_method;
4708 MonoInst **prev_locals, **prev_args;
4709 MonoType **prev_arg_types;
4710 guint prev_real_offset;
4711 GHashTable *prev_cbb_hash;
4712 MonoBasicBlock **prev_cil_offset_to_bb;
4713 MonoBasicBlock *prev_cbb;
4714 unsigned char* prev_cil_start;
4715 guint32 prev_cil_offset_to_bb_len;
4716 MonoMethod *prev_current_method;
4717 MonoGenericContext *prev_generic_context;
4718 gboolean ret_var_set, prev_ret_var_set;
4720 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4722 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4723 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4726 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4727 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4731 if (cfg->verbose_level > 2)
4732 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4734 if (!cmethod->inline_info) {
4735 mono_jit_stats.inlineable_methods++;
4736 cmethod->inline_info = 1;
4739 /* allocate local variables */
4740 cheader = mono_method_get_header (cmethod);
4742 if (cheader == NULL || mono_loader_get_last_error ()) {
4744 mono_metadata_free_mh (cheader);
4745 mono_loader_clear_error ();
4749 /*Must verify before creating locals as it can cause the JIT to assert.*/
4750 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4751 mono_metadata_free_mh (cheader);
4755 /* allocate space to store the return value */
4756 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4757 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4761 prev_locals = cfg->locals;
4762 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4763 for (i = 0; i < cheader->num_locals; ++i)
4764 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4766 /* allocate start and end blocks */
4767 /* This is needed so if the inline is aborted, we can clean up */
4768 NEW_BBLOCK (cfg, sbblock);
4769 sbblock->real_offset = real_offset;
4771 NEW_BBLOCK (cfg, ebblock);
4772 ebblock->block_num = cfg->num_bblocks++;
4773 ebblock->real_offset = real_offset;
4775 prev_args = cfg->args;
4776 prev_arg_types = cfg->arg_types;
4777 prev_inlined_method = cfg->inlined_method;
4778 cfg->inlined_method = cmethod;
4779 cfg->ret_var_set = FALSE;
4780 cfg->inline_depth ++;
4781 prev_real_offset = cfg->real_offset;
4782 prev_cbb_hash = cfg->cbb_hash;
4783 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4784 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4785 prev_cil_start = cfg->cil_start;
4786 prev_cbb = cfg->cbb;
4787 prev_current_method = cfg->current_method;
4788 prev_generic_context = cfg->generic_context;
4789 prev_ret_var_set = cfg->ret_var_set;
4791 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4793 ret_var_set = cfg->ret_var_set;
4795 cfg->inlined_method = prev_inlined_method;
4796 cfg->real_offset = prev_real_offset;
4797 cfg->cbb_hash = prev_cbb_hash;
4798 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4799 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4800 cfg->cil_start = prev_cil_start;
4801 cfg->locals = prev_locals;
4802 cfg->args = prev_args;
4803 cfg->arg_types = prev_arg_types;
4804 cfg->current_method = prev_current_method;
4805 cfg->generic_context = prev_generic_context;
4806 cfg->ret_var_set = prev_ret_var_set;
4807 cfg->inline_depth --;
4809 if ((costs >= 0 && costs < 60) || inline_allways) {
4810 if (cfg->verbose_level > 2)
4811 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4813 mono_jit_stats.inlined_methods++;
4815 /* always add some code to avoid block split failures */
4816 MONO_INST_NEW (cfg, ins, OP_NOP);
4817 MONO_ADD_INS (prev_cbb, ins);
4819 prev_cbb->next_bb = sbblock;
4820 link_bblock (cfg, prev_cbb, sbblock);
4823 * Get rid of the begin and end bblocks if possible to aid local
4826 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4828 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4829 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4831 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4832 MonoBasicBlock *prev = ebblock->in_bb [0];
4833 mono_merge_basic_blocks (cfg, prev, ebblock);
4835 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4836 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4837 cfg->cbb = prev_cbb;
4845 * If the inlined method contains only a throw, then the ret var is not
4846 * set, so set it to a dummy value.
4849 static double r8_0 = 0.0;
4851 switch (rvar->type) {
4853 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4856 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4861 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4864 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4865 ins->type = STACK_R8;
4866 ins->inst_p0 = (void*)&r8_0;
4867 ins->dreg = rvar->dreg;
4868 MONO_ADD_INS (cfg->cbb, ins);
4871 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4874 g_assert_not_reached ();
4878 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4881 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4884 if (cfg->verbose_level > 2)
4885 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4886 cfg->exception_type = MONO_EXCEPTION_NONE;
4887 mono_loader_clear_error ();
4889 /* This gets rid of the newly added bblocks */
4890 cfg->cbb = prev_cbb;
4892 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4897 * Some of these comments may well be out-of-date.
4898 * Design decisions: we do a single pass over the IL code (and we do bblock
4899 * splitting/merging in the few cases when it's required: a back jump to an IL
4900 * address that was not already seen as bblock starting point).
4901 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4902 * Complex operations are decomposed in simpler ones right away. We need to let the
4903 * arch-specific code peek and poke inside this process somehow (except when the
4904 * optimizations can take advantage of the full semantic info of coarse opcodes).
4905 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4906 * MonoInst->opcode initially is the IL opcode or some simplification of that
4907 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4908 * opcode with value bigger than OP_LAST.
4909 * At this point the IR can be handed over to an interpreter, a dumb code generator
4910 * or to the optimizing code generator that will translate it to SSA form.
4912 * Profiling directed optimizations.
4913 * We may compile by default with few or no optimizations and instrument the code
4914 * or the user may indicate what methods to optimize the most either in a config file
4915 * or through repeated runs where the compiler applies offline the optimizations to
4916 * each method and then decides if it was worth it.
4919 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4920 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4921 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4922 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4923 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4924 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4925 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4926 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4928 /* offset from br.s -> br like opcodes */
4929 #define BIG_BRANCH_OFFSET 13
4932 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4934 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4936 return b == NULL || b == bb;
4940 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4942 unsigned char *ip = start;
4943 unsigned char *target;
4946 MonoBasicBlock *bblock;
4947 const MonoOpcode *opcode;
4950 cli_addr = ip - start;
4951 i = mono_opcode_value ((const guint8 **)&ip, end);
4954 opcode = &mono_opcodes [i];
4955 switch (opcode->argument) {
4956 case MonoInlineNone:
4959 case MonoInlineString:
4960 case MonoInlineType:
4961 case MonoInlineField:
4962 case MonoInlineMethod:
4965 case MonoShortInlineR:
4972 case MonoShortInlineVar:
4973 case MonoShortInlineI:
4976 case MonoShortInlineBrTarget:
4977 target = start + cli_addr + 2 + (signed char)ip [1];
4978 GET_BBLOCK (cfg, bblock, target);
4981 GET_BBLOCK (cfg, bblock, ip);
4983 case MonoInlineBrTarget:
4984 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4985 GET_BBLOCK (cfg, bblock, target);
4988 GET_BBLOCK (cfg, bblock, ip);
4990 case MonoInlineSwitch: {
4991 guint32 n = read32 (ip + 1);
4994 cli_addr += 5 + 4 * n;
4995 target = start + cli_addr;
4996 GET_BBLOCK (cfg, bblock, target);
4998 for (j = 0; j < n; ++j) {
4999 target = start + cli_addr + (gint32)read32 (ip);
5000 GET_BBLOCK (cfg, bblock, target);
5010 g_assert_not_reached ();
5013 if (i == CEE_THROW) {
5014 unsigned char *bb_start = ip - 1;
5016 /* Find the start of the bblock containing the throw */
5018 while ((bb_start >= start) && !bblock) {
5019 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5023 bblock->out_of_line = 1;
5032 static inline MonoMethod *
5033 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5037 if (m->wrapper_type != MONO_WRAPPER_NONE)
5038 return mono_method_get_wrapper_data (m, token);
5040 method = mono_get_method_full (m->klass->image, token, klass, context);
5045 static inline MonoMethod *
5046 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5048 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5050 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5056 static inline MonoClass*
5057 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5061 if (method->wrapper_type != MONO_WRAPPER_NONE)
5062 klass = mono_method_get_wrapper_data (method, token);
5064 klass = mono_class_get_full (method->klass->image, token, context);
5066 mono_class_init (klass);
5071 * Returns TRUE if the JIT should abort inlining because "callee"
5072 * is influenced by security attributes.
5075 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5079 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5083 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5084 if (result == MONO_JIT_SECURITY_OK)
5087 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5088 /* Generate code to throw a SecurityException before the actual call/link */
5089 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5092 NEW_ICONST (cfg, args [0], 4);
5093 NEW_METHODCONST (cfg, args [1], caller);
5094 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5095 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5096 /* don't hide previous results */
5097 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5098 cfg->exception_data = result;
5106 throw_exception (void)
5108 static MonoMethod *method = NULL;
5111 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5112 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5119 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5121 MonoMethod *thrower = throw_exception ();
5124 EMIT_NEW_PCONST (cfg, args [0], ex);
5125 mono_emit_method_call (cfg, thrower, args, NULL);
5129 * Return the original method is a wrapper is specified. We can only access
5130 * the custom attributes from the original method.
5133 get_original_method (MonoMethod *method)
5135 if (method->wrapper_type == MONO_WRAPPER_NONE)
5138 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5139 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5142 /* in other cases we need to find the original method */
5143 return mono_marshal_method_from_wrapper (method);
5147 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5148 MonoBasicBlock *bblock, unsigned char *ip)
5150 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5151 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5153 emit_throw_exception (cfg, ex);
5157 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5158 MonoBasicBlock *bblock, unsigned char *ip)
5160 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5161 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5163 emit_throw_exception (cfg, ex);
5167 * Check that the IL instructions at ip are the array initialization
5168 * sequence and return the pointer to the data and the size.
5171 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5174 * newarr[System.Int32]
5176 * ldtoken field valuetype ...
5177 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5179 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5180 guint32 token = read32 (ip + 7);
5181 guint32 field_token = read32 (ip + 2);
5182 guint32 field_index = field_token & 0xffffff;
5184 const char *data_ptr;
5186 MonoMethod *cmethod;
5187 MonoClass *dummy_class;
5188 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5194 *out_field_token = field_token;
5196 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5199 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5201 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5202 case MONO_TYPE_BOOLEAN:
5206 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5207 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5208 case MONO_TYPE_CHAR:
5218 return NULL; /* stupid ARM FP swapped format */
5228 if (size > mono_type_size (field->type, &dummy_align))
5231 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5232 if (!method->klass->image->dynamic) {
5233 field_index = read32 (ip + 2) & 0xffffff;
5234 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5235 data_ptr = mono_image_rva_map (method->klass->image, rva);
5236 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5237 /* for aot code we do the lookup on load */
5238 if (aot && data_ptr)
5239 return GUINT_TO_POINTER (rva);
5241 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5243 data_ptr = mono_field_get_data (field);
5251 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5253 char *method_fname = mono_method_full_name (method, TRUE);
5255 MonoMethodHeader *header = mono_method_get_header (method);
5257 if (header->code_size == 0)
5258 method_code = g_strdup ("method body is empty.");
5260 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5261 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5262 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5263 g_free (method_fname);
5264 g_free (method_code);
5265 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5269 set_exception_object (MonoCompile *cfg, MonoException *exception)
5271 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5272 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5273 cfg->exception_ptr = exception;
5277 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5281 if (cfg->generic_sharing_context)
5282 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5284 type = &klass->byval_arg;
5285 return MONO_TYPE_IS_REFERENCE (type);
5289 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5292 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5293 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5294 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5295 /* Optimize reg-reg moves away */
5297 * Can't optimize other opcodes, since sp[0] might point to
5298 * the last ins of a decomposed opcode.
5300 sp [0]->dreg = (cfg)->locals [n]->dreg;
5302 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5307 * ldloca inhibits many optimizations so try to get rid of it in common
5310 static inline unsigned char *
5311 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5320 local = read16 (ip + 2);
5324 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5325 gboolean skip = FALSE;
5327 /* From the INITOBJ case */
5328 token = read32 (ip + 2);
5329 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5330 CHECK_TYPELOAD (klass);
5331 if (generic_class_is_reference_type (cfg, klass)) {
5332 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5333 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5334 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5335 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5336 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5349 is_exception_class (MonoClass *class)
5352 if (class == mono_defaults.exception_class)
5354 class = class->parent;
5360 * mono_method_to_ir:
5362 * Translate the .net IL into linear IR.
5365 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5366 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5367 guint inline_offset, gboolean is_virtual_call)
5370 MonoInst *ins, **sp, **stack_start;
5371 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5372 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5373 MonoMethod *cmethod, *method_definition;
5374 MonoInst **arg_array;
5375 MonoMethodHeader *header;
5377 guint32 token, ins_flag;
5379 MonoClass *constrained_call = NULL;
5380 unsigned char *ip, *end, *target, *err_pos;
5381 static double r8_0 = 0.0;
5382 MonoMethodSignature *sig;
5383 MonoGenericContext *generic_context = NULL;
5384 MonoGenericContainer *generic_container = NULL;
5385 MonoType **param_types;
5386 int i, n, start_new_bblock, dreg;
5387 int num_calls = 0, inline_costs = 0;
5388 int breakpoint_id = 0;
5390 MonoBoolean security, pinvoke;
5391 MonoSecurityManager* secman = NULL;
5392 MonoDeclSecurityActions actions;
5393 GSList *class_inits = NULL;
5394 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5396 gboolean init_locals, seq_points, skip_dead_blocks;
5398 /* serialization and xdomain stuff may need access to private fields and methods */
5399 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5400 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5401 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5402 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5403 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5404 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5406 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5408 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5409 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5410 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5411 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5413 image = method->klass->image;
5414 header = mono_method_get_header (method);
5416 MonoLoaderError *error;
5418 if ((error = mono_loader_get_last_error ())) {
5419 cfg->exception_type = error->exception_type;
5421 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5422 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5424 goto exception_exit;
5426 generic_container = mono_method_get_generic_container (method);
5427 sig = mono_method_signature (method);
5428 num_args = sig->hasthis + sig->param_count;
5429 ip = (unsigned char*)header->code;
5430 cfg->cil_start = ip;
5431 end = ip + header->code_size;
5432 mono_jit_stats.cil_code_size += header->code_size;
5433 init_locals = header->init_locals;
5435 seq_points = cfg->gen_seq_points && cfg->method == method;
5438 * Methods without init_locals set could cause asserts in various passes
5443 method_definition = method;
5444 while (method_definition->is_inflated) {
5445 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5446 method_definition = imethod->declaring;
5449 /* SkipVerification is not allowed if core-clr is enabled */
5450 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5452 dont_verify_stloc = TRUE;
5455 if (mono_debug_using_mono_debugger ())
5456 cfg->keep_cil_nops = TRUE;
5458 if (sig->is_inflated)
5459 generic_context = mono_method_get_context (method);
5460 else if (generic_container)
5461 generic_context = &generic_container->context;
5462 cfg->generic_context = generic_context;
5464 if (!cfg->generic_sharing_context)
5465 g_assert (!sig->has_type_parameters);
5467 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5468 g_assert (method->is_inflated);
5469 g_assert (mono_method_get_context (method)->method_inst);
5471 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5472 g_assert (sig->generic_param_count);
5474 if (cfg->method == method) {
5475 cfg->real_offset = 0;
5477 cfg->real_offset = inline_offset;
5480 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5481 cfg->cil_offset_to_bb_len = header->code_size;
5483 cfg->current_method = method;
5485 if (cfg->verbose_level > 2)
5486 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5488 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5490 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5491 for (n = 0; n < sig->param_count; ++n)
5492 param_types [n + sig->hasthis] = sig->params [n];
5493 cfg->arg_types = param_types;
5495 dont_inline = g_list_prepend (dont_inline, method);
5496 if (cfg->method == method) {
5498 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5499 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5502 NEW_BBLOCK (cfg, start_bblock);
5503 cfg->bb_entry = start_bblock;
5504 start_bblock->cil_code = NULL;
5505 start_bblock->cil_length = 0;
5508 NEW_BBLOCK (cfg, end_bblock);
5509 cfg->bb_exit = end_bblock;
5510 end_bblock->cil_code = NULL;
5511 end_bblock->cil_length = 0;
5512 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5513 g_assert (cfg->num_bblocks == 2);
5515 arg_array = cfg->args;
5517 if (header->num_clauses) {
5518 cfg->spvars = g_hash_table_new (NULL, NULL);
5519 cfg->exvars = g_hash_table_new (NULL, NULL);
5521 /* handle exception clauses */
5522 for (i = 0; i < header->num_clauses; ++i) {
5523 MonoBasicBlock *try_bb;
5524 MonoExceptionClause *clause = &header->clauses [i];
5525 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5526 try_bb->real_offset = clause->try_offset;
5527 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5528 tblock->real_offset = clause->handler_offset;
5529 tblock->flags |= BB_EXCEPTION_HANDLER;
5531 link_bblock (cfg, try_bb, tblock);
5533 if (*(ip + clause->handler_offset) == CEE_POP)
5534 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5536 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5537 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5538 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5539 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5540 MONO_ADD_INS (tblock, ins);
5542 /* todo: is a fault block unsafe to optimize? */
5543 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5544 tblock->flags |= BB_EXCEPTION_UNSAFE;
5548 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5550 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5552 /* catch and filter blocks get the exception object on the stack */
5553 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5554 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5555 MonoInst *dummy_use;
5557 /* mostly like handle_stack_args (), but just sets the input args */
5558 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5559 tblock->in_scount = 1;
5560 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5561 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5564 * Add a dummy use for the exvar so its liveness info will be
5568 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5570 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5571 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5572 tblock->flags |= BB_EXCEPTION_HANDLER;
5573 tblock->real_offset = clause->data.filter_offset;
5574 tblock->in_scount = 1;
5575 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5576 /* The filter block shares the exvar with the handler block */
5577 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5578 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5579 MONO_ADD_INS (tblock, ins);
5583 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5584 clause->data.catch_class &&
5585 cfg->generic_sharing_context &&
5586 mono_class_check_context_used (clause->data.catch_class)) {
5588 * In shared generic code with catch
5589 * clauses containing type variables
5590 * the exception handling code has to
5591 * be able to get to the rgctx.
5592 * Therefore we have to make sure that
5593 * the vtable/mrgctx argument (for
5594 * static or generic methods) or the
5595 * "this" argument (for non-static
5596 * methods) are live.
5598 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5599 mini_method_get_context (method)->method_inst ||
5600 method->klass->valuetype) {
5601 mono_get_vtable_var (cfg);
5603 MonoInst *dummy_use;
5605 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5610 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5611 cfg->cbb = start_bblock;
5612 cfg->args = arg_array;
5613 mono_save_args (cfg, sig, inline_args);
5616 /* FIRST CODE BLOCK */
5617 NEW_BBLOCK (cfg, bblock);
5618 bblock->cil_code = ip;
5622 ADD_BBLOCK (cfg, bblock);
5624 if (cfg->method == method) {
5625 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5626 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5627 MONO_INST_NEW (cfg, ins, OP_BREAK);
5628 MONO_ADD_INS (bblock, ins);
5632 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5633 secman = mono_security_manager_get_methods ();
5635 security = (secman && mono_method_has_declsec (method));
5636 /* at this point having security doesn't mean we have any code to generate */
5637 if (security && (cfg->method == method)) {
5638 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5639 * And we do not want to enter the next section (with allocation) if we
5640 * have nothing to generate */
5641 security = mono_declsec_get_demands (method, &actions);
5644 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5645 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5647 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5648 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5649 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5651 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5652 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5656 mono_custom_attrs_free (custom);
5659 custom = mono_custom_attrs_from_class (wrapped->klass);
5660 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5664 mono_custom_attrs_free (custom);
5667 /* not a P/Invoke after all */
5672 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5673 /* we use a separate basic block for the initialization code */
5674 NEW_BBLOCK (cfg, init_localsbb);
5675 cfg->bb_init = init_localsbb;
5676 init_localsbb->real_offset = cfg->real_offset;
5677 start_bblock->next_bb = init_localsbb;
5678 init_localsbb->next_bb = bblock;
5679 link_bblock (cfg, start_bblock, init_localsbb);
5680 link_bblock (cfg, init_localsbb, bblock);
5682 cfg->cbb = init_localsbb;
5684 start_bblock->next_bb = bblock;
5685 link_bblock (cfg, start_bblock, bblock);
5688 /* at this point we know, if security is TRUE, that some code needs to be generated */
5689 if (security && (cfg->method == method)) {
5692 mono_jit_stats.cas_demand_generation++;
5694 if (actions.demand.blob) {
5695 /* Add code for SecurityAction.Demand */
5696 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5697 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5698 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5699 mono_emit_method_call (cfg, secman->demand, args, NULL);
5701 if (actions.noncasdemand.blob) {
5702 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5703 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5704 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5705 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5706 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5707 mono_emit_method_call (cfg, secman->demand, args, NULL);
5709 if (actions.demandchoice.blob) {
5710 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5711 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5712 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5713 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5714 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5718 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5720 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5723 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5724 /* check if this is native code, e.g. an icall or a p/invoke */
5725 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5726 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5728 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5729 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5731 /* if this ia a native call then it can only be JITted from platform code */
5732 if ((icall || pinvk) && method->klass && method->klass->image) {
5733 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5734 MonoException *ex = icall ? mono_get_exception_security () :
5735 mono_get_exception_method_access ();
5736 emit_throw_exception (cfg, ex);
5743 if (header->code_size == 0)
5746 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5751 if (cfg->method == method)
5752 mono_debug_init_method (cfg, bblock, breakpoint_id);
5754 for (n = 0; n < header->num_locals; ++n) {
5755 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5760 /* We force the vtable variable here for all shared methods
5761 for the possibility that they might show up in a stack
5762 trace where their exact instantiation is needed. */
5763 if (cfg->generic_sharing_context && method == cfg->method) {
5764 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5765 mini_method_get_context (method)->method_inst ||
5766 method->klass->valuetype) {
5767 mono_get_vtable_var (cfg);
5769 /* FIXME: Is there a better way to do this?
5770 We need the variable live for the duration
5771 of the whole method. */
5772 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5776 /* add a check for this != NULL to inlined methods */
5777 if (is_virtual_call) {
5780 NEW_ARGLOAD (cfg, arg_ins, 0);
5781 MONO_ADD_INS (cfg->cbb, arg_ins);
5782 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5785 skip_dead_blocks = !dont_verify;
5786 if (skip_dead_blocks) {
5787 original_bb = bb = mono_basic_block_split (method, &error);
5788 if (!mono_error_ok (&error)) {
5789 mono_error_cleanup (&error);
5795 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5796 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5799 start_new_bblock = 0;
5802 if (cfg->method == method)
5803 cfg->real_offset = ip - header->code;
5805 cfg->real_offset = inline_offset;
5810 if (start_new_bblock) {
5811 bblock->cil_length = ip - bblock->cil_code;
5812 if (start_new_bblock == 2) {
5813 g_assert (ip == tblock->cil_code);
5815 GET_BBLOCK (cfg, tblock, ip);
5817 bblock->next_bb = tblock;
5820 start_new_bblock = 0;
5821 for (i = 0; i < bblock->in_scount; ++i) {
5822 if (cfg->verbose_level > 3)
5823 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5824 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5828 g_slist_free (class_inits);
5831 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5832 link_bblock (cfg, bblock, tblock);
5833 if (sp != stack_start) {
5834 handle_stack_args (cfg, stack_start, sp - stack_start);
5836 CHECK_UNVERIFIABLE (cfg);
5838 bblock->next_bb = tblock;
5841 for (i = 0; i < bblock->in_scount; ++i) {
5842 if (cfg->verbose_level > 3)
5843 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5844 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5847 g_slist_free (class_inits);
5852 if (skip_dead_blocks) {
5853 int ip_offset = ip - header->code;
5855 if (ip_offset == bb->end)
5859 int op_size = mono_opcode_size (ip, end);
5860 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5862 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5864 if (ip_offset + op_size == bb->end) {
5865 MONO_INST_NEW (cfg, ins, OP_NOP);
5866 MONO_ADD_INS (bblock, ins);
5867 start_new_bblock = 1;
5875 * Sequence points are points where the debugger can place a breakpoint.
5876 * Currently, we generate these automatically at points where the IL
5879 if (seq_points && sp == stack_start) {
5880 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5881 MONO_ADD_INS (cfg->cbb, ins);
5884 bblock->real_offset = cfg->real_offset;
5886 if ((cfg->method == method) && cfg->coverage_info) {
5887 guint32 cil_offset = ip - header->code;
5888 cfg->coverage_info->data [cil_offset].cil_code = ip;
5890 /* TODO: Use an increment here */
5891 #if defined(TARGET_X86)
5892 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5893 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5895 MONO_ADD_INS (cfg->cbb, ins);
5897 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5898 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5902 if (cfg->verbose_level > 3)
5903 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5907 if (cfg->keep_cil_nops)
5908 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5910 MONO_INST_NEW (cfg, ins, OP_NOP);
5912 MONO_ADD_INS (bblock, ins);
5915 if (should_insert_brekpoint (cfg->method))
5916 MONO_INST_NEW (cfg, ins, OP_BREAK);
5918 MONO_INST_NEW (cfg, ins, OP_NOP);
5920 MONO_ADD_INS (bblock, ins);
5926 CHECK_STACK_OVF (1);
5927 n = (*ip)-CEE_LDARG_0;
5929 EMIT_NEW_ARGLOAD (cfg, ins, n);
5937 CHECK_STACK_OVF (1);
5938 n = (*ip)-CEE_LDLOC_0;
5940 EMIT_NEW_LOCLOAD (cfg, ins, n);
5949 n = (*ip)-CEE_STLOC_0;
5952 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5954 emit_stloc_ir (cfg, sp, header, n);
5961 CHECK_STACK_OVF (1);
5964 EMIT_NEW_ARGLOAD (cfg, ins, n);
5970 CHECK_STACK_OVF (1);
5973 NEW_ARGLOADA (cfg, ins, n);
5974 MONO_ADD_INS (cfg->cbb, ins);
5984 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5986 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5991 CHECK_STACK_OVF (1);
5994 EMIT_NEW_LOCLOAD (cfg, ins, n);
5998 case CEE_LDLOCA_S: {
5999 unsigned char *tmp_ip;
6001 CHECK_STACK_OVF (1);
6002 CHECK_LOCAL (ip [1]);
6004 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6010 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6019 CHECK_LOCAL (ip [1]);
6020 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6022 emit_stloc_ir (cfg, sp, header, ip [1]);
6027 CHECK_STACK_OVF (1);
6028 EMIT_NEW_PCONST (cfg, ins, NULL);
6029 ins->type = STACK_OBJ;
6034 CHECK_STACK_OVF (1);
6035 EMIT_NEW_ICONST (cfg, ins, -1);
6048 CHECK_STACK_OVF (1);
6049 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6055 CHECK_STACK_OVF (1);
6057 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6063 CHECK_STACK_OVF (1);
6064 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6070 CHECK_STACK_OVF (1);
6071 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6072 ins->type = STACK_I8;
6073 ins->dreg = alloc_dreg (cfg, STACK_I8);
6075 ins->inst_l = (gint64)read64 (ip);
6076 MONO_ADD_INS (bblock, ins);
6082 gboolean use_aotconst = FALSE;
6084 #ifdef TARGET_POWERPC
6085 /* FIXME: Clean this up */
6086 if (cfg->compile_aot)
6087 use_aotconst = TRUE;
6090 /* FIXME: we should really allocate this only late in the compilation process */
6091 f = mono_domain_alloc (cfg->domain, sizeof (float));
6093 CHECK_STACK_OVF (1);
6099 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6101 dreg = alloc_freg (cfg);
6102 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6103 ins->type = STACK_R8;
6105 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6106 ins->type = STACK_R8;
6107 ins->dreg = alloc_dreg (cfg, STACK_R8);
6109 MONO_ADD_INS (bblock, ins);
6119 gboolean use_aotconst = FALSE;
6121 #ifdef TARGET_POWERPC
6122 /* FIXME: Clean this up */
6123 if (cfg->compile_aot)
6124 use_aotconst = TRUE;
6127 /* FIXME: we should really allocate this only late in the compilation process */
6128 d = mono_domain_alloc (cfg->domain, sizeof (double));
6130 CHECK_STACK_OVF (1);
6136 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6138 dreg = alloc_freg (cfg);
6139 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6140 ins->type = STACK_R8;
6142 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6143 ins->type = STACK_R8;
6144 ins->dreg = alloc_dreg (cfg, STACK_R8);
6146 MONO_ADD_INS (bblock, ins);
6155 MonoInst *temp, *store;
6157 CHECK_STACK_OVF (1);
6161 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6162 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6164 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6167 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6180 if (sp [0]->type == STACK_R8)
6181 /* we need to pop the value from the x86 FP stack */
6182 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6191 if (stack_start != sp)
6193 token = read32 (ip + 1);
6194 /* FIXME: check the signature matches */
6195 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6197 if (!cmethod || mono_loader_get_last_error ())
6200 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6201 GENERIC_SHARING_FAILURE (CEE_JMP);
6203 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6204 CHECK_CFG_EXCEPTION;
6206 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6208 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6211 /* Handle tail calls similarly to calls */
6212 n = fsig->param_count + fsig->hasthis;
6214 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6215 call->method = cmethod;
6216 call->tail_call = TRUE;
6217 call->signature = mono_method_signature (cmethod);
6218 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6219 call->inst.inst_p0 = cmethod;
6220 for (i = 0; i < n; ++i)
6221 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6223 mono_arch_emit_call (cfg, call);
6224 MONO_ADD_INS (bblock, (MonoInst*)call);
6227 for (i = 0; i < num_args; ++i)
6228 /* Prevent arguments from being optimized away */
6229 arg_array [i]->flags |= MONO_INST_VOLATILE;
6231 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6232 ins = (MonoInst*)call;
6233 ins->inst_p0 = cmethod;
6234 MONO_ADD_INS (bblock, ins);
6238 start_new_bblock = 1;
6243 case CEE_CALLVIRT: {
6244 MonoInst *addr = NULL;
6245 MonoMethodSignature *fsig = NULL;
6247 int virtual = *ip == CEE_CALLVIRT;
6248 int calli = *ip == CEE_CALLI;
6249 gboolean pass_imt_from_rgctx = FALSE;
6250 MonoInst *imt_arg = NULL;
6251 gboolean pass_vtable = FALSE;
6252 gboolean pass_mrgctx = FALSE;
6253 MonoInst *vtable_arg = NULL;
6254 gboolean check_this = FALSE;
6255 gboolean supported_tail_call = FALSE;
6258 token = read32 (ip + 1);
6265 if (method->wrapper_type != MONO_WRAPPER_NONE)
6266 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6268 fsig = mono_metadata_parse_signature (image, token);
6270 n = fsig->param_count + fsig->hasthis;
6272 if (method->dynamic && fsig->pinvoke) {
6276 * This is a call through a function pointer using a pinvoke
6277 * signature. Have to create a wrapper and call that instead.
6278 * FIXME: This is very slow, need to create a wrapper at JIT time
6279 * instead based on the signature.
6281 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6282 EMIT_NEW_PCONST (cfg, args [1], fsig);
6284 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6287 MonoMethod *cil_method;
6289 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6290 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6291 cil_method = cmethod;
6292 } else if (constrained_call) {
6293 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6295 * This is needed since get_method_constrained can't find
6296 * the method in klass representing a type var.
6297 * The type var is guaranteed to be a reference type in this
6300 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6301 cil_method = cmethod;
6302 g_assert (!cmethod->klass->valuetype);
6304 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6307 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6308 cil_method = cmethod;
6311 if (!cmethod || mono_loader_get_last_error ())
6313 if (!dont_verify && !cfg->skip_visibility) {
6314 MonoMethod *target_method = cil_method;
6315 if (method->is_inflated) {
6316 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6318 if (!mono_method_can_access_method (method_definition, target_method) &&
6319 !mono_method_can_access_method (method, cil_method))
6320 METHOD_ACCESS_FAILURE;
6323 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6324 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6326 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6327 /* MS.NET seems to silently convert this to a callvirt */
6332 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6333 * converts to a callvirt.
6335 * tests/bug-515884.il is an example of this behavior
6337 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6338 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6339 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6343 if (!cmethod->klass->inited)
6344 if (!mono_class_init (cmethod->klass))
6347 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6348 mini_class_is_system_array (cmethod->klass)) {
6349 array_rank = cmethod->klass->rank;
6350 fsig = mono_method_signature (cmethod);
6352 fsig = mono_method_signature (cmethod);
6357 if (fsig->pinvoke) {
6358 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6359 check_for_pending_exc, FALSE);
6360 fsig = mono_method_signature (wrapper);
6361 } else if (constrained_call) {
6362 fsig = mono_method_signature (cmethod);
6364 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6368 mono_save_token_info (cfg, image, token, cil_method);
6370 n = fsig->param_count + fsig->hasthis;
6372 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6373 if (check_linkdemand (cfg, method, cmethod))
6375 CHECK_CFG_EXCEPTION;
6378 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6379 g_assert_not_reached ();
6382 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6385 if (!cfg->generic_sharing_context && cmethod)
6386 g_assert (!mono_method_check_context_used (cmethod));
6390 //g_assert (!virtual || fsig->hasthis);
6394 if (constrained_call) {
6396 * We have the `constrained.' prefix opcode.
6398 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6400 * The type parameter is instantiated as a valuetype,
6401 * but that type doesn't override the method we're
6402 * calling, so we need to box `this'.
6404 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6405 ins->klass = constrained_call;
6406 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6407 CHECK_CFG_EXCEPTION;
6408 } else if (!constrained_call->valuetype) {
6409 int dreg = alloc_preg (cfg);
6412 * The type parameter is instantiated as a reference
6413 * type. We have a managed pointer on the stack, so
6414 * we need to dereference it here.
6416 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6417 ins->type = STACK_OBJ;
6419 } else if (cmethod->klass->valuetype)
6421 constrained_call = NULL;
6424 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6428 * If the callee is a shared method, then its static cctor
6429 * might not get called after the call was patched.
6431 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6432 emit_generic_class_init (cfg, cmethod->klass);
6433 CHECK_TYPELOAD (cmethod->klass);
6436 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6437 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6438 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6439 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6440 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6443 * Pass vtable iff target method might
6444 * be shared, which means that sharing
6445 * is enabled for its class and its
6446 * context is sharable (and it's not a
6449 if (sharing_enabled && context_sharable &&
6450 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6454 if (cmethod && mini_method_get_context (cmethod) &&
6455 mini_method_get_context (cmethod)->method_inst) {
6456 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6457 MonoGenericContext *context = mini_method_get_context (cmethod);
6458 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6460 g_assert (!pass_vtable);
6462 if (sharing_enabled && context_sharable)
6466 if (cfg->generic_sharing_context && cmethod) {
6467 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6469 context_used = mono_method_check_context_used (cmethod);
6471 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6472 /* Generic method interface
6473 calls are resolved via a
6474 helper function and don't
6476 if (!cmethod_context || !cmethod_context->method_inst)
6477 pass_imt_from_rgctx = TRUE;
6481 * If a shared method calls another
6482 * shared method then the caller must
6483 * have a generic sharing context
6484 * because the magic trampoline
6485 * requires it. FIXME: We shouldn't
6486 * have to force the vtable/mrgctx
6487 * variable here. Instead there
6488 * should be a flag in the cfg to
6489 * request a generic sharing context.
6492 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6493 mono_get_vtable_var (cfg);
6498 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6500 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6502 CHECK_TYPELOAD (cmethod->klass);
6503 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6508 g_assert (!vtable_arg);
6510 if (!cfg->compile_aot) {
6512 * emit_get_rgctx_method () calls mono_class_vtable () so check
6513 * for type load errors before.
6515 mono_class_setup_vtable (cmethod->klass);
6516 CHECK_TYPELOAD (cmethod->klass);
6519 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6521 /* !marshalbyref is needed to properly handle generic methods + remoting */
6522 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6523 MONO_METHOD_IS_FINAL (cmethod)) &&
6524 !cmethod->klass->marshalbyref) {
6531 if (pass_imt_from_rgctx) {
6532 g_assert (!pass_vtable);
6535 imt_arg = emit_get_rgctx_method (cfg, context_used,
6536 cmethod, MONO_RGCTX_INFO_METHOD);
6540 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6542 /* Calling virtual generic methods */
6543 if (cmethod && virtual &&
6544 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6545 !(MONO_METHOD_IS_FINAL (cmethod) &&
6546 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6547 mono_method_signature (cmethod)->generic_param_count) {
6548 MonoInst *this_temp, *this_arg_temp, *store;
6549 MonoInst *iargs [4];
6551 g_assert (mono_method_signature (cmethod)->is_inflated);
6553 /* Prevent inlining of methods that contain indirect calls */
6556 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6557 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6558 g_assert (!imt_arg);
6560 g_assert (cmethod->is_inflated);
6561 imt_arg = emit_get_rgctx_method (cfg, context_used,
6562 cmethod, MONO_RGCTX_INFO_METHOD);
6563 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6567 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6568 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6569 MONO_ADD_INS (bblock, store);
6571 /* FIXME: This should be a managed pointer */
6572 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6574 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6575 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6576 cmethod, MONO_RGCTX_INFO_METHOD);
6577 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6578 addr = mono_emit_jit_icall (cfg,
6579 mono_helper_compile_generic_method, iargs);
6581 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6583 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6586 if (!MONO_TYPE_IS_VOID (fsig->ret))
6587 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6589 CHECK_CFG_EXCEPTION;
6596 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6597 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6599 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6603 /* FIXME: runtime generic context pointer for jumps? */
6604 /* FIXME: handle this for generic sharing eventually */
6605 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6608 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6611 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6612 /* Handle tail calls similarly to calls */
6613 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6615 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6616 call->tail_call = TRUE;
6617 call->method = cmethod;
6618 call->signature = mono_method_signature (cmethod);
6621 * We implement tail calls by storing the actual arguments into the
6622 * argument variables, then emitting a CEE_JMP.
6624 for (i = 0; i < n; ++i) {
6625 /* Prevent argument from being register allocated */
6626 arg_array [i]->flags |= MONO_INST_VOLATILE;
6627 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6631 ins = (MonoInst*)call;
6632 ins->inst_p0 = cmethod;
6633 ins->inst_p1 = arg_array [0];
6634 MONO_ADD_INS (bblock, ins);
6635 link_bblock (cfg, bblock, end_bblock);
6636 start_new_bblock = 1;
6638 CHECK_CFG_EXCEPTION;
6640 /* skip CEE_RET as well */
6646 /* Conversion to a JIT intrinsic */
6647 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6649 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6650 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6655 CHECK_CFG_EXCEPTION;
6663 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6664 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6665 mono_method_check_inlining (cfg, cmethod) &&
6666 !g_list_find (dont_inline, cmethod)) {
6668 gboolean allways = FALSE;
6670 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6671 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6672 /* Prevent inlining of methods that call wrappers */
6674 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6678 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6680 cfg->real_offset += 5;
6683 if (!MONO_TYPE_IS_VOID (fsig->ret))
6684 /* *sp is already set by inline_method */
6687 inline_costs += costs;
6693 inline_costs += 10 * num_calls++;
6695 /* Tail recursion elimination */
6696 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6697 gboolean has_vtargs = FALSE;
6700 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6703 /* keep it simple */
6704 for (i = fsig->param_count - 1; i >= 0; i--) {
6705 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6710 for (i = 0; i < n; ++i)
6711 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6712 MONO_INST_NEW (cfg, ins, OP_BR);
6713 MONO_ADD_INS (bblock, ins);
6714 tblock = start_bblock->out_bb [0];
6715 link_bblock (cfg, bblock, tblock);
6716 ins->inst_target_bb = tblock;
6717 start_new_bblock = 1;
6719 /* skip the CEE_RET, too */
6720 if (ip_in_bb (cfg, bblock, ip + 5))
6730 /* Generic sharing */
6731 /* FIXME: only do this for generic methods if
6732 they are not shared! */
6733 if (context_used && !imt_arg && !array_rank &&
6734 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6735 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6736 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6737 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6740 g_assert (cfg->generic_sharing_context && cmethod);
6744 * We are compiling a call to a
6745 * generic method from shared code,
6746 * which means that we have to look up
6747 * the method in the rgctx and do an
6750 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6753 /* Indirect calls */
6755 g_assert (!imt_arg);
6757 if (*ip == CEE_CALL)
6758 g_assert (context_used);
6759 else if (*ip == CEE_CALLI)
6760 g_assert (!vtable_arg);
6762 /* FIXME: what the hell is this??? */
6763 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6764 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6766 /* Prevent inlining of methods with indirect calls */
6771 int rgctx_reg = mono_alloc_preg (cfg);
6773 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6774 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6775 call = (MonoCallInst*)ins;
6776 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6778 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6780 * Instead of emitting an indirect call, emit a direct call
6781 * with the contents of the aotconst as the patch info.
6783 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6785 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6786 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6789 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6792 if (!MONO_TYPE_IS_VOID (fsig->ret))
6793 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6795 CHECK_CFG_EXCEPTION;
6806 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6807 if (sp [fsig->param_count]->type == STACK_OBJ) {
6808 MonoInst *iargs [2];
6811 iargs [1] = sp [fsig->param_count];
6813 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6816 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6817 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6818 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6819 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6821 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6824 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6825 if (!cmethod->klass->element_class->valuetype && !readonly)
6826 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6827 CHECK_TYPELOAD (cmethod->klass);
6830 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6833 g_assert_not_reached ();
6836 CHECK_CFG_EXCEPTION;
6843 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6845 if (!MONO_TYPE_IS_VOID (fsig->ret))
6846 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6848 CHECK_CFG_EXCEPTION;
6858 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6860 } else if (imt_arg) {
6861 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6863 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6866 if (!MONO_TYPE_IS_VOID (fsig->ret))
6867 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6869 CHECK_CFG_EXCEPTION;
6876 if (cfg->method != method) {
6877 /* return from inlined method */
6879 * If in_count == 0, that means the ret is unreachable due to
6880 * being preceeded by a throw. In that case, inline_method () will
6881 * handle setting the return value
6882 * (test case: test_0_inline_throw ()).
6884 if (return_var && cfg->cbb->in_count) {
6888 //g_assert (returnvar != -1);
6889 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6890 cfg->ret_var_set = TRUE;
6894 MonoType *ret_type = mono_method_signature (method)->ret;
6898 * Place a seq point here too even through the IL stack is not
6899 * empty, so a step over on
6902 * will work correctly.
6904 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6905 MONO_ADD_INS (cfg->cbb, ins);
6908 g_assert (!return_var);
6911 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6914 if (!cfg->vret_addr) {
6917 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6919 EMIT_NEW_RETLOADA (cfg, ret_addr);
6921 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6922 ins->klass = mono_class_from_mono_type (ret_type);
6925 #ifdef MONO_ARCH_SOFT_FLOAT
6926 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6927 MonoInst *iargs [1];
6931 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6932 mono_arch_emit_setret (cfg, method, conv);
6934 mono_arch_emit_setret (cfg, method, *sp);
6937 mono_arch_emit_setret (cfg, method, *sp);
6942 if (sp != stack_start)
6944 MONO_INST_NEW (cfg, ins, OP_BR);
6946 ins->inst_target_bb = end_bblock;
6947 MONO_ADD_INS (bblock, ins);
6948 link_bblock (cfg, bblock, end_bblock);
6949 start_new_bblock = 1;
6953 MONO_INST_NEW (cfg, ins, OP_BR);
6955 target = ip + 1 + (signed char)(*ip);
6957 GET_BBLOCK (cfg, tblock, target);
6958 link_bblock (cfg, bblock, tblock);
6959 ins->inst_target_bb = tblock;
6960 if (sp != stack_start) {
6961 handle_stack_args (cfg, stack_start, sp - stack_start);
6963 CHECK_UNVERIFIABLE (cfg);
6965 MONO_ADD_INS (bblock, ins);
6966 start_new_bblock = 1;
6967 inline_costs += BRANCH_COST;
6981 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6983 target = ip + 1 + *(signed char*)ip;
6989 inline_costs += BRANCH_COST;
6993 MONO_INST_NEW (cfg, ins, OP_BR);
6996 target = ip + 4 + (gint32)read32(ip);
6998 GET_BBLOCK (cfg, tblock, target);
6999 link_bblock (cfg, bblock, tblock);
7000 ins->inst_target_bb = tblock;
7001 if (sp != stack_start) {
7002 handle_stack_args (cfg, stack_start, sp - stack_start);
7004 CHECK_UNVERIFIABLE (cfg);
7007 MONO_ADD_INS (bblock, ins);
7009 start_new_bblock = 1;
7010 inline_costs += BRANCH_COST;
7017 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7018 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7019 guint32 opsize = is_short ? 1 : 4;
7021 CHECK_OPSIZE (opsize);
7023 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7026 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7031 GET_BBLOCK (cfg, tblock, target);
7032 link_bblock (cfg, bblock, tblock);
7033 GET_BBLOCK (cfg, tblock, ip);
7034 link_bblock (cfg, bblock, tblock);
7036 if (sp != stack_start) {
7037 handle_stack_args (cfg, stack_start, sp - stack_start);
7038 CHECK_UNVERIFIABLE (cfg);
7041 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7042 cmp->sreg1 = sp [0]->dreg;
7043 type_from_op (cmp, sp [0], NULL);
7046 #if SIZEOF_REGISTER == 4
7047 if (cmp->opcode == OP_LCOMPARE_IMM) {
7048 /* Convert it to OP_LCOMPARE */
7049 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7050 ins->type = STACK_I8;
7051 ins->dreg = alloc_dreg (cfg, STACK_I8);
7053 MONO_ADD_INS (bblock, ins);
7054 cmp->opcode = OP_LCOMPARE;
7055 cmp->sreg2 = ins->dreg;
7058 MONO_ADD_INS (bblock, cmp);
7060 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7061 type_from_op (ins, sp [0], NULL);
7062 MONO_ADD_INS (bblock, ins);
7063 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7064 GET_BBLOCK (cfg, tblock, target);
7065 ins->inst_true_bb = tblock;
7066 GET_BBLOCK (cfg, tblock, ip);
7067 ins->inst_false_bb = tblock;
7068 start_new_bblock = 2;
7071 inline_costs += BRANCH_COST;
7086 MONO_INST_NEW (cfg, ins, *ip);
7088 target = ip + 4 + (gint32)read32(ip);
7094 inline_costs += BRANCH_COST;
7098 MonoBasicBlock **targets;
7099 MonoBasicBlock *default_bblock;
7100 MonoJumpInfoBBTable *table;
7101 int offset_reg = alloc_preg (cfg);
7102 int target_reg = alloc_preg (cfg);
7103 int table_reg = alloc_preg (cfg);
7104 int sum_reg = alloc_preg (cfg);
7105 gboolean use_op_switch;
7109 n = read32 (ip + 1);
7112 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7116 CHECK_OPSIZE (n * sizeof (guint32));
7117 target = ip + n * sizeof (guint32);
7119 GET_BBLOCK (cfg, default_bblock, target);
7120 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7122 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7123 for (i = 0; i < n; ++i) {
7124 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7125 targets [i] = tblock;
7126 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7130 if (sp != stack_start) {
7132 * Link the current bb with the targets as well, so handle_stack_args
7133 * will set their in_stack correctly.
7135 link_bblock (cfg, bblock, default_bblock);
7136 for (i = 0; i < n; ++i)
7137 link_bblock (cfg, bblock, targets [i]);
7139 handle_stack_args (cfg, stack_start, sp - stack_start);
7141 CHECK_UNVERIFIABLE (cfg);
7144 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7148 for (i = 0; i < n; ++i)
7149 link_bblock (cfg, bblock, targets [i]);
7151 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7152 table->table = targets;
7153 table->table_size = n;
7155 use_op_switch = FALSE;
7157 /* ARM implements SWITCH statements differently */
7158 /* FIXME: Make it use the generic implementation */
7159 if (!cfg->compile_aot)
7160 use_op_switch = TRUE;
7163 if (COMPILE_LLVM (cfg))
7164 use_op_switch = TRUE;
7166 cfg->cbb->has_jump_table = 1;
7168 if (use_op_switch) {
7169 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7170 ins->sreg1 = src1->dreg;
7171 ins->inst_p0 = table;
7172 ins->inst_many_bb = targets;
7173 ins->klass = GUINT_TO_POINTER (n);
7174 MONO_ADD_INS (cfg->cbb, ins);
7176 if (sizeof (gpointer) == 8)
7177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7181 #if SIZEOF_REGISTER == 8
7182 /* The upper word might not be zero, and we add it to a 64 bit address later */
7183 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7186 if (cfg->compile_aot) {
7187 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7189 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7190 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7191 ins->inst_p0 = table;
7192 ins->dreg = table_reg;
7193 MONO_ADD_INS (cfg->cbb, ins);
7196 /* FIXME: Use load_memindex */
7197 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7199 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7201 start_new_bblock = 1;
7202 inline_costs += (BRANCH_COST * 2);
7222 dreg = alloc_freg (cfg);
7225 dreg = alloc_lreg (cfg);
7228 dreg = alloc_preg (cfg);
7231 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7232 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7233 ins->flags |= ins_flag;
7235 MONO_ADD_INS (bblock, ins);
7250 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7251 ins->flags |= ins_flag;
7253 MONO_ADD_INS (bblock, ins);
7255 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7256 emit_write_barrier (cfg, sp [0], sp [1], -1);
7265 MONO_INST_NEW (cfg, ins, (*ip));
7267 ins->sreg1 = sp [0]->dreg;
7268 ins->sreg2 = sp [1]->dreg;
7269 type_from_op (ins, sp [0], sp [1]);
7271 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7273 /* Use the immediate opcodes if possible */
7274 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7275 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7276 if (imm_opcode != -1) {
7277 ins->opcode = imm_opcode;
7278 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7281 sp [1]->opcode = OP_NOP;
7285 MONO_ADD_INS ((cfg)->cbb, (ins));
7287 *sp++ = mono_decompose_opcode (cfg, ins);
7304 MONO_INST_NEW (cfg, ins, (*ip));
7306 ins->sreg1 = sp [0]->dreg;
7307 ins->sreg2 = sp [1]->dreg;
7308 type_from_op (ins, sp [0], sp [1]);
7310 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7311 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7313 /* FIXME: Pass opcode to is_inst_imm */
7315 /* Use the immediate opcodes if possible */
7316 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7319 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7320 if (imm_opcode != -1) {
7321 ins->opcode = imm_opcode;
7322 if (sp [1]->opcode == OP_I8CONST) {
7323 #if SIZEOF_REGISTER == 8
7324 ins->inst_imm = sp [1]->inst_l;
7326 ins->inst_ls_word = sp [1]->inst_ls_word;
7327 ins->inst_ms_word = sp [1]->inst_ms_word;
7331 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7334 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7335 if (sp [1]->next == NULL)
7336 sp [1]->opcode = OP_NOP;
7339 MONO_ADD_INS ((cfg)->cbb, (ins));
7341 *sp++ = mono_decompose_opcode (cfg, ins);
7354 case CEE_CONV_OVF_I8:
7355 case CEE_CONV_OVF_U8:
7359 /* Special case this earlier so we have long constants in the IR */
7360 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7361 int data = sp [-1]->inst_c0;
7362 sp [-1]->opcode = OP_I8CONST;
7363 sp [-1]->type = STACK_I8;
7364 #if SIZEOF_REGISTER == 8
7365 if ((*ip) == CEE_CONV_U8)
7366 sp [-1]->inst_c0 = (guint32)data;
7368 sp [-1]->inst_c0 = data;
7370 sp [-1]->inst_ls_word = data;
7371 if ((*ip) == CEE_CONV_U8)
7372 sp [-1]->inst_ms_word = 0;
7374 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7376 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7383 case CEE_CONV_OVF_I4:
7384 case CEE_CONV_OVF_I1:
7385 case CEE_CONV_OVF_I2:
7386 case CEE_CONV_OVF_I:
7387 case CEE_CONV_OVF_U:
7390 if (sp [-1]->type == STACK_R8) {
7391 ADD_UNOP (CEE_CONV_OVF_I8);
7398 case CEE_CONV_OVF_U1:
7399 case CEE_CONV_OVF_U2:
7400 case CEE_CONV_OVF_U4:
7403 if (sp [-1]->type == STACK_R8) {
7404 ADD_UNOP (CEE_CONV_OVF_U8);
7411 case CEE_CONV_OVF_I1_UN:
7412 case CEE_CONV_OVF_I2_UN:
7413 case CEE_CONV_OVF_I4_UN:
7414 case CEE_CONV_OVF_I8_UN:
7415 case CEE_CONV_OVF_U1_UN:
7416 case CEE_CONV_OVF_U2_UN:
7417 case CEE_CONV_OVF_U4_UN:
7418 case CEE_CONV_OVF_U8_UN:
7419 case CEE_CONV_OVF_I_UN:
7420 case CEE_CONV_OVF_U_UN:
7427 CHECK_CFG_EXCEPTION;
7431 case CEE_ADD_OVF_UN:
7433 case CEE_MUL_OVF_UN:
7435 case CEE_SUB_OVF_UN:
7443 token = read32 (ip + 1);
7444 klass = mini_get_class (method, token, generic_context);
7445 CHECK_TYPELOAD (klass);
7447 if (generic_class_is_reference_type (cfg, klass)) {
7448 MonoInst *store, *load;
7449 int dreg = alloc_preg (cfg);
7451 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7452 load->flags |= ins_flag;
7453 MONO_ADD_INS (cfg->cbb, load);
7455 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7456 store->flags |= ins_flag;
7457 MONO_ADD_INS (cfg->cbb, store);
7459 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7460 emit_write_barrier (cfg, sp [0], sp [1], -1);
7462 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7474 token = read32 (ip + 1);
7475 klass = mini_get_class (method, token, generic_context);
7476 CHECK_TYPELOAD (klass);
7478 /* Optimize the common ldobj+stloc combination */
7488 loc_index = ip [5] - CEE_STLOC_0;
7495 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7496 CHECK_LOCAL (loc_index);
7498 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7499 ins->dreg = cfg->locals [loc_index]->dreg;
7505 /* Optimize the ldobj+stobj combination */
7506 /* The reference case ends up being a load+store anyway */
7507 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7512 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7519 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7528 CHECK_STACK_OVF (1);
7530 n = read32 (ip + 1);
7532 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7533 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7534 ins->type = STACK_OBJ;
7537 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7538 MonoInst *iargs [1];
7540 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7541 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7543 if (cfg->opt & MONO_OPT_SHARED) {
7544 MonoInst *iargs [3];
7546 if (cfg->compile_aot) {
7547 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7549 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7550 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7551 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7552 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7553 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7555 if (bblock->out_of_line) {
7556 MonoInst *iargs [2];
7558 if (image == mono_defaults.corlib) {
7560 * Avoid relocations in AOT and save some space by using a
7561 * version of helper_ldstr specialized to mscorlib.
7563 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7564 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7566 /* Avoid creating the string object */
7567 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7568 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7569 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7573 if (cfg->compile_aot) {
7574 NEW_LDSTRCONST (cfg, ins, image, n);
7576 MONO_ADD_INS (bblock, ins);
7579 NEW_PCONST (cfg, ins, NULL);
7580 ins->type = STACK_OBJ;
7581 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7583 MONO_ADD_INS (bblock, ins);
7592 MonoInst *iargs [2];
7593 MonoMethodSignature *fsig;
7596 MonoInst *vtable_arg = NULL;
7599 token = read32 (ip + 1);
7600 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7601 if (!cmethod || mono_loader_get_last_error ())
7603 fsig = mono_method_get_signature (cmethod, image, token);
7607 mono_save_token_info (cfg, image, token, cmethod);
7609 if (!mono_class_init (cmethod->klass))
7612 if (cfg->generic_sharing_context)
7613 context_used = mono_method_check_context_used (cmethod);
7615 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7616 if (check_linkdemand (cfg, method, cmethod))
7618 CHECK_CFG_EXCEPTION;
7619 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7620 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7623 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7624 emit_generic_class_init (cfg, cmethod->klass);
7625 CHECK_TYPELOAD (cmethod->klass);
7628 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7629 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7630 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7631 mono_class_vtable (cfg->domain, cmethod->klass);
7632 CHECK_TYPELOAD (cmethod->klass);
7634 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7635 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7638 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7639 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7641 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7643 CHECK_TYPELOAD (cmethod->klass);
7644 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7649 n = fsig->param_count;
7653 * Generate smaller code for the common newobj <exception> instruction in
7654 * argument checking code.
7656 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7657 is_exception_class (cmethod->klass) && n <= 2 &&
7658 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7659 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7660 MonoInst *iargs [3];
7662 g_assert (!vtable_arg);
7666 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7669 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7673 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7678 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7681 g_assert_not_reached ();
7689 /* move the args to allow room for 'this' in the first position */
7695 /* check_call_signature () requires sp[0] to be set */
7696 this_ins.type = STACK_OBJ;
7698 if (check_call_signature (cfg, fsig, sp))
7703 if (mini_class_is_system_array (cmethod->klass)) {
7704 g_assert (!vtable_arg);
7706 *sp = emit_get_rgctx_method (cfg, context_used,
7707 cmethod, MONO_RGCTX_INFO_METHOD);
7709 /* Avoid varargs in the common case */
7710 if (fsig->param_count == 1)
7711 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7712 else if (fsig->param_count == 2)
7713 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7714 else if (fsig->param_count == 3)
7715 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7717 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7718 } else if (cmethod->string_ctor) {
7719 g_assert (!context_used);
7720 g_assert (!vtable_arg);
7721 /* we simply pass a null pointer */
7722 EMIT_NEW_PCONST (cfg, *sp, NULL);
7723 /* now call the string ctor */
7724 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7726 MonoInst* callvirt_this_arg = NULL;
7728 if (cmethod->klass->valuetype) {
7729 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7730 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7731 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7736 * The code generated by mini_emit_virtual_call () expects
7737 * iargs [0] to be a boxed instance, but luckily the vcall
7738 * will be transformed into a normal call there.
7740 } else if (context_used) {
7741 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7744 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7746 CHECK_TYPELOAD (cmethod->klass);
7749 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7750 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7751 * As a workaround, we call class cctors before allocating objects.
7753 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7754 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7755 if (cfg->verbose_level > 2)
7756 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7757 class_inits = g_slist_prepend (class_inits, vtable);
7760 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7763 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7766 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7768 /* Now call the actual ctor */
7769 /* Avoid virtual calls to ctors if possible */
7770 if (cmethod->klass->marshalbyref)
7771 callvirt_this_arg = sp [0];
7774 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7775 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7776 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7781 CHECK_CFG_EXCEPTION;
7786 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7787 mono_method_check_inlining (cfg, cmethod) &&
7788 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7789 !g_list_find (dont_inline, cmethod)) {
7792 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7793 cfg->real_offset += 5;
7796 inline_costs += costs - 5;
7799 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7801 } else if (context_used &&
7802 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7803 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7804 MonoInst *cmethod_addr;
7806 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7807 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7809 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7812 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7813 callvirt_this_arg, NULL, vtable_arg);
7817 if (alloc == NULL) {
7819 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7820 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7834 token = read32 (ip + 1);
7835 klass = mini_get_class (method, token, generic_context);
7836 CHECK_TYPELOAD (klass);
7837 if (sp [0]->type != STACK_OBJ)
7840 if (cfg->generic_sharing_context)
7841 context_used = mono_class_check_context_used (klass);
7843 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7850 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7852 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7856 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7857 MonoMethod *mono_castclass;
7858 MonoInst *iargs [1];
7861 mono_castclass = mono_marshal_get_castclass (klass);
7864 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7865 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7866 g_assert (costs > 0);
7869 cfg->real_offset += 5;
7874 inline_costs += costs;
7877 ins = handle_castclass (cfg, klass, *sp, context_used);
7878 CHECK_CFG_EXCEPTION;
7888 token = read32 (ip + 1);
7889 klass = mini_get_class (method, token, generic_context);
7890 CHECK_TYPELOAD (klass);
7891 if (sp [0]->type != STACK_OBJ)
7894 if (cfg->generic_sharing_context)
7895 context_used = mono_class_check_context_used (klass);
7897 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7904 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7906 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7910 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7911 MonoMethod *mono_isinst;
7912 MonoInst *iargs [1];
7915 mono_isinst = mono_marshal_get_isinst (klass);
7918 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7919 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7920 g_assert (costs > 0);
7923 cfg->real_offset += 5;
7928 inline_costs += costs;
7931 ins = handle_isinst (cfg, klass, *sp, context_used);
7932 CHECK_CFG_EXCEPTION;
7939 case CEE_UNBOX_ANY: {
7943 token = read32 (ip + 1);
7944 klass = mini_get_class (method, token, generic_context);
7945 CHECK_TYPELOAD (klass);
7947 mono_save_token_info (cfg, image, token, klass);
7949 if (cfg->generic_sharing_context)
7950 context_used = mono_class_check_context_used (klass);
7952 if (generic_class_is_reference_type (cfg, klass)) {
7953 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7954 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7955 MonoMethod *mono_castclass;
7956 MonoInst *iargs [1];
7959 mono_castclass = mono_marshal_get_castclass (klass);
7962 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7963 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7965 g_assert (costs > 0);
7968 cfg->real_offset += 5;
7972 inline_costs += costs;
7974 ins = handle_castclass (cfg, klass, *sp, context_used);
7975 CHECK_CFG_EXCEPTION;
7983 if (mono_class_is_nullable (klass)) {
7984 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7991 ins = handle_unbox (cfg, klass, sp, context_used);
7997 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8010 token = read32 (ip + 1);
8011 klass = mini_get_class (method, token, generic_context);
8012 CHECK_TYPELOAD (klass);
8014 mono_save_token_info (cfg, image, token, klass);
8016 if (cfg->generic_sharing_context)
8017 context_used = mono_class_check_context_used (klass);
8019 if (generic_class_is_reference_type (cfg, klass)) {
8025 if (klass == mono_defaults.void_class)
8027 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8029 /* frequent check in generic code: box (struct), brtrue */
8031 // FIXME: LLVM can't handle the inconsistent bb linking
8032 if (!mono_class_is_nullable (klass) &&
8033 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8034 (ip [5] == CEE_BRTRUE ||
8035 ip [5] == CEE_BRTRUE_S ||
8036 ip [5] == CEE_BRFALSE ||
8037 ip [5] == CEE_BRFALSE_S)) {
8038 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8040 MonoBasicBlock *true_bb, *false_bb;
8044 if (cfg->verbose_level > 3) {
8045 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8046 printf ("<box+brtrue opt>\n");
8054 target = ip + 1 + (signed char)(*ip);
8061 target = ip + 4 + (gint)(read32 (ip));
8065 g_assert_not_reached ();
8069 * We need to link both bblocks, since it is needed for handling stack
8070 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8071 * Branching to only one of them would lead to inconsistencies, so
8072 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8074 GET_BBLOCK (cfg, true_bb, target);
8075 GET_BBLOCK (cfg, false_bb, ip);
8077 mono_link_bblock (cfg, cfg->cbb, true_bb);
8078 mono_link_bblock (cfg, cfg->cbb, false_bb);
8080 if (sp != stack_start) {
8081 handle_stack_args (cfg, stack_start, sp - stack_start);
8083 CHECK_UNVERIFIABLE (cfg);
8086 if (COMPILE_LLVM (cfg)) {
8087 dreg = alloc_ireg (cfg);
8088 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8091 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8093 /* The JIT can't eliminate the iconst+compare */
8094 MONO_INST_NEW (cfg, ins, OP_BR);
8095 ins->inst_target_bb = is_true ? true_bb : false_bb;
8096 MONO_ADD_INS (cfg->cbb, ins);
8099 start_new_bblock = 1;
8103 *sp++ = handle_box (cfg, val, klass, context_used);
8105 CHECK_CFG_EXCEPTION;
8114 token = read32 (ip + 1);
8115 klass = mini_get_class (method, token, generic_context);
8116 CHECK_TYPELOAD (klass);
8118 mono_save_token_info (cfg, image, token, klass);
8120 if (cfg->generic_sharing_context)
8121 context_used = mono_class_check_context_used (klass);
8123 if (mono_class_is_nullable (klass)) {
8126 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8127 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8131 ins = handle_unbox (cfg, klass, sp, context_used);
8141 MonoClassField *field;
8145 if (*ip == CEE_STFLD) {
8152 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8154 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8157 token = read32 (ip + 1);
8158 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8159 field = mono_method_get_wrapper_data (method, token);
8160 klass = field->parent;
8163 field = mono_field_from_token (image, token, &klass, generic_context);
8167 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8168 FIELD_ACCESS_FAILURE;
8169 mono_class_init (klass);
8171 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8172 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8173 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8174 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8177 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8178 if (*ip == CEE_STFLD) {
8179 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8181 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8182 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8183 MonoInst *iargs [5];
8186 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8187 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8188 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8192 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8193 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8194 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8195 g_assert (costs > 0);
8197 cfg->real_offset += 5;
8200 inline_costs += costs;
8202 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8207 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8209 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8210 if (sp [0]->opcode != OP_LDADDR)
8211 store->flags |= MONO_INST_FAULT;
8213 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8214 /* insert call to write barrier */
8218 dreg = alloc_preg (cfg);
8219 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8220 emit_write_barrier (cfg, ptr, sp [1], -1);
8223 store->flags |= ins_flag;
8230 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8231 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8232 MonoInst *iargs [4];
8235 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8236 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8237 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8238 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8239 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8240 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8242 g_assert (costs > 0);
8244 cfg->real_offset += 5;
8248 inline_costs += costs;
8250 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8254 if (sp [0]->type == STACK_VTYPE) {
8257 /* Have to compute the address of the variable */
8259 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8261 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8263 g_assert (var->klass == klass);
8265 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8269 if (*ip == CEE_LDFLDA) {
8270 if (sp [0]->type == STACK_OBJ) {
8271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8272 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8275 dreg = alloc_preg (cfg);
8277 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8278 ins->klass = mono_class_from_mono_type (field->type);
8279 ins->type = STACK_MP;
8284 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8286 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8287 load->flags |= ins_flag;
8288 if (sp [0]->opcode != OP_LDADDR)
8289 load->flags |= MONO_INST_FAULT;
8300 MonoClassField *field;
8301 gpointer addr = NULL;
8302 gboolean is_special_static;
8305 token = read32 (ip + 1);
8307 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8308 field = mono_method_get_wrapper_data (method, token);
8309 klass = field->parent;
8312 field = mono_field_from_token (image, token, &klass, generic_context);
8315 mono_class_init (klass);
8316 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8317 FIELD_ACCESS_FAILURE;
8319 /* if the class is Critical then transparent code cannot access it's fields */
8320 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8321 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8324 * We can only support shared generic static
8325 * field access on architectures where the
8326 * trampoline code has been extended to handle
8327 * the generic class init.
8329 #ifndef MONO_ARCH_VTABLE_REG
8330 GENERIC_SHARING_FAILURE (*ip);
8333 if (cfg->generic_sharing_context)
8334 context_used = mono_class_check_context_used (klass);
8336 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8338 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8339 * to be called here.
8341 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8342 mono_class_vtable (cfg->domain, klass);
8343 CHECK_TYPELOAD (klass);
8345 mono_domain_lock (cfg->domain);
8346 if (cfg->domain->special_static_fields)
8347 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8348 mono_domain_unlock (cfg->domain);
8350 is_special_static = mono_class_field_is_special_static (field);
8352 /* Generate IR to compute the field address */
8353 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8355 * Fast access to TLS data
8356 * Inline version of get_thread_static_data () in
8360 int idx, static_data_reg, array_reg, dreg;
8361 MonoInst *thread_ins;
8363 // offset &= 0x7fffffff;
8364 // idx = (offset >> 24) - 1;
8365 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8367 thread_ins = mono_get_thread_intrinsic (cfg);
8368 MONO_ADD_INS (cfg->cbb, thread_ins);
8369 static_data_reg = alloc_ireg (cfg);
8370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8372 if (cfg->compile_aot) {
8373 int offset_reg, offset2_reg, idx_reg;
8375 /* For TLS variables, this will return the TLS offset */
8376 EMIT_NEW_SFLDACONST (cfg, ins, field);
8377 offset_reg = ins->dreg;
8378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8379 idx_reg = alloc_ireg (cfg);
8380 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8383 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8384 array_reg = alloc_ireg (cfg);
8385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8386 offset2_reg = alloc_ireg (cfg);
8387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8388 dreg = alloc_ireg (cfg);
8389 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8391 offset = (gsize)addr & 0x7fffffff;
8392 idx = (offset >> 24) - 1;
8394 array_reg = alloc_ireg (cfg);
8395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8396 dreg = alloc_ireg (cfg);
8397 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8399 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8400 (cfg->compile_aot && is_special_static) ||
8401 (context_used && is_special_static)) {
8402 MonoInst *iargs [2];
8404 g_assert (field->parent);
8405 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8407 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8408 field, MONO_RGCTX_INFO_CLASS_FIELD);
8410 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8412 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8413 } else if (context_used) {
8414 MonoInst *static_data;
8417 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8418 method->klass->name_space, method->klass->name, method->name,
8419 depth, field->offset);
8422 if (mono_class_needs_cctor_run (klass, method))
8423 emit_generic_class_init (cfg, klass);
8426 * The pointer we're computing here is
8428 * super_info.static_data + field->offset
8430 static_data = emit_get_rgctx_klass (cfg, context_used,
8431 klass, MONO_RGCTX_INFO_STATIC_DATA);
8433 if (field->offset == 0) {
8436 int addr_reg = mono_alloc_preg (cfg);
8437 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8439 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8440 MonoInst *iargs [2];
8442 g_assert (field->parent);
8443 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8444 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8445 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8447 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8449 CHECK_TYPELOAD (klass);
8451 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8452 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8453 if (cfg->verbose_level > 2)
8454 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8455 class_inits = g_slist_prepend (class_inits, vtable);
8457 if (cfg->run_cctors) {
8459 /* This makes so that inline cannot trigger */
8460 /* .cctors: too many apps depend on them */
8461 /* running with a specific order... */
8462 if (! vtable->initialized)
8464 ex = mono_runtime_class_init_full (vtable, FALSE);
8466 set_exception_object (cfg, ex);
8467 goto exception_exit;
8471 addr = (char*)vtable->data + field->offset;
8473 if (cfg->compile_aot)
8474 EMIT_NEW_SFLDACONST (cfg, ins, field);
8476 EMIT_NEW_PCONST (cfg, ins, addr);
8478 MonoInst *iargs [1];
8479 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8480 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8484 /* Generate IR to do the actual load/store operation */
8486 if (*ip == CEE_LDSFLDA) {
8487 ins->klass = mono_class_from_mono_type (field->type);
8488 ins->type = STACK_PTR;
8490 } else if (*ip == CEE_STSFLD) {
8495 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8496 store->flags |= ins_flag;
8498 gboolean is_const = FALSE;
8499 MonoVTable *vtable = NULL;
8501 if (!context_used) {
8502 vtable = mono_class_vtable (cfg->domain, klass);
8503 CHECK_TYPELOAD (klass);
8505 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8506 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8507 gpointer addr = (char*)vtable->data + field->offset;
8508 int ro_type = field->type->type;
8509 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8510 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8512 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8515 case MONO_TYPE_BOOLEAN:
8517 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8521 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8524 case MONO_TYPE_CHAR:
8526 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8530 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8535 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8539 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8545 case MONO_TYPE_FNPTR:
8546 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8547 type_to_eval_stack_type ((cfg), field->type, *sp);
8550 case MONO_TYPE_STRING:
8551 case MONO_TYPE_OBJECT:
8552 case MONO_TYPE_CLASS:
8553 case MONO_TYPE_SZARRAY:
8554 case MONO_TYPE_ARRAY:
8555 if (!mono_gc_is_moving ()) {
8556 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8557 type_to_eval_stack_type ((cfg), field->type, *sp);
8565 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8570 case MONO_TYPE_VALUETYPE:
8580 CHECK_STACK_OVF (1);
8582 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8583 load->flags |= ins_flag;
8596 token = read32 (ip + 1);
8597 klass = mini_get_class (method, token, generic_context);
8598 CHECK_TYPELOAD (klass);
8599 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8600 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8601 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8602 generic_class_is_reference_type (cfg, klass)) {
8603 /* insert call to write barrier */
8604 emit_write_barrier (cfg, sp [0], sp [1], -1);
8616 const char *data_ptr;
8618 guint32 field_token;
8624 token = read32 (ip + 1);
8626 klass = mini_get_class (method, token, generic_context);
8627 CHECK_TYPELOAD (klass);
8629 if (cfg->generic_sharing_context)
8630 context_used = mono_class_check_context_used (klass);
8632 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8633 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8634 ins->sreg1 = sp [0]->dreg;
8635 ins->type = STACK_I4;
8636 ins->dreg = alloc_ireg (cfg);
8637 MONO_ADD_INS (cfg->cbb, ins);
8638 *sp = mono_decompose_opcode (cfg, ins);
8643 MonoClass *array_class = mono_array_class_get (klass, 1);
8644 /* FIXME: we cannot get a managed
8645 allocator because we can't get the
8646 open generic class's vtable. We
8647 have the same problem in
8648 handle_alloc(). This
8649 needs to be solved so that we can
8650 have managed allocs of shared
8653 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8654 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8656 MonoMethod *managed_alloc = NULL;
8658 /* FIXME: Decompose later to help abcrem */
8661 args [0] = emit_get_rgctx_klass (cfg, context_used,
8662 array_class, MONO_RGCTX_INFO_VTABLE);
8667 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8669 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8671 if (cfg->opt & MONO_OPT_SHARED) {
8672 /* Decompose now to avoid problems with references to the domainvar */
8673 MonoInst *iargs [3];
8675 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8676 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8679 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8681 /* Decompose later since it is needed by abcrem */
8682 MonoClass *array_type = mono_array_class_get (klass, 1);
8683 mono_class_vtable (cfg->domain, array_type);
8684 CHECK_TYPELOAD (array_type);
8686 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8687 ins->dreg = alloc_preg (cfg);
8688 ins->sreg1 = sp [0]->dreg;
8689 ins->inst_newa_class = klass;
8690 ins->type = STACK_OBJ;
8692 MONO_ADD_INS (cfg->cbb, ins);
8693 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8694 cfg->cbb->has_array_access = TRUE;
8696 /* Needed so mono_emit_load_get_addr () gets called */
8697 mono_get_got_var (cfg);
8707 * we inline/optimize the initialization sequence if possible.
8708 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8709 * for small sizes open code the memcpy
8710 * ensure the rva field is big enough
8712 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8713 MonoMethod *memcpy_method = get_memcpy_method ();
8714 MonoInst *iargs [3];
8715 int add_reg = alloc_preg (cfg);
8717 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8718 if (cfg->compile_aot) {
8719 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8721 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8723 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8724 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8733 if (sp [0]->type != STACK_OBJ)
8736 dreg = alloc_preg (cfg);
8737 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8738 ins->dreg = alloc_preg (cfg);
8739 ins->sreg1 = sp [0]->dreg;
8740 ins->type = STACK_I4;
8741 /* This flag will be inherited by the decomposition */
8742 ins->flags |= MONO_INST_FAULT;
8743 MONO_ADD_INS (cfg->cbb, ins);
8744 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8745 cfg->cbb->has_array_access = TRUE;
8753 if (sp [0]->type != STACK_OBJ)
8756 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8758 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8759 CHECK_TYPELOAD (klass);
8760 /* we need to make sure that this array is exactly the type it needs
8761 * to be for correctness. the wrappers are lax with their usage
8762 * so we need to ignore them here
8764 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8765 MonoClass *array_class = mono_array_class_get (klass, 1);
8766 mini_emit_check_array_type (cfg, sp [0], array_class);
8767 CHECK_TYPELOAD (array_class);
8771 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8786 case CEE_LDELEM_REF: {
8792 if (*ip == CEE_LDELEM) {
8794 token = read32 (ip + 1);
8795 klass = mini_get_class (method, token, generic_context);
8796 CHECK_TYPELOAD (klass);
8797 mono_class_init (klass);
8800 klass = array_access_to_klass (*ip);
8802 if (sp [0]->type != STACK_OBJ)
8805 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8807 if (sp [1]->opcode == OP_ICONST) {
8808 int array_reg = sp [0]->dreg;
8809 int index_reg = sp [1]->dreg;
8810 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8812 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8813 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8815 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8816 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8819 if (*ip == CEE_LDELEM)
8832 case CEE_STELEM_REF:
8839 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8841 if (*ip == CEE_STELEM) {
8843 token = read32 (ip + 1);
8844 klass = mini_get_class (method, token, generic_context);
8845 CHECK_TYPELOAD (klass);
8846 mono_class_init (klass);
8849 klass = array_access_to_klass (*ip);
8851 if (sp [0]->type != STACK_OBJ)
8854 /* storing a NULL doesn't need any of the complex checks in stelemref */
8855 if (generic_class_is_reference_type (cfg, klass) &&
8856 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8857 MonoMethod* helper = mono_marshal_get_stelemref ();
8858 MonoInst *iargs [3];
8860 if (sp [0]->type != STACK_OBJ)
8862 if (sp [2]->type != STACK_OBJ)
8869 mono_emit_method_call (cfg, helper, iargs, NULL);
8871 if (sp [1]->opcode == OP_ICONST) {
8872 int array_reg = sp [0]->dreg;
8873 int index_reg = sp [1]->dreg;
8874 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8876 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8877 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8879 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8880 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8884 if (*ip == CEE_STELEM)
8891 case CEE_CKFINITE: {
8895 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8896 ins->sreg1 = sp [0]->dreg;
8897 ins->dreg = alloc_freg (cfg);
8898 ins->type = STACK_R8;
8899 MONO_ADD_INS (bblock, ins);
8901 *sp++ = mono_decompose_opcode (cfg, ins);
8906 case CEE_REFANYVAL: {
8907 MonoInst *src_var, *src;
8909 int klass_reg = alloc_preg (cfg);
8910 int dreg = alloc_preg (cfg);
8913 MONO_INST_NEW (cfg, ins, *ip);
8916 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8917 CHECK_TYPELOAD (klass);
8918 mono_class_init (klass);
8920 if (cfg->generic_sharing_context)
8921 context_used = mono_class_check_context_used (klass);
8924 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8926 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8927 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8928 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8931 MonoInst *klass_ins;
8933 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8934 klass, MONO_RGCTX_INFO_KLASS);
8937 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8938 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8940 mini_emit_class_check (cfg, klass_reg, klass);
8942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8943 ins->type = STACK_MP;
8948 case CEE_MKREFANY: {
8949 MonoInst *loc, *addr;
8952 MONO_INST_NEW (cfg, ins, *ip);
8955 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8956 CHECK_TYPELOAD (klass);
8957 mono_class_init (klass);
8959 if (cfg->generic_sharing_context)
8960 context_used = mono_class_check_context_used (klass);
8962 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8963 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8966 MonoInst *const_ins;
8967 int type_reg = alloc_preg (cfg);
8969 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8970 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8972 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8973 } else if (cfg->compile_aot) {
8974 int const_reg = alloc_preg (cfg);
8975 int type_reg = alloc_preg (cfg);
8977 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8978 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8979 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8980 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8982 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8983 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8987 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8988 ins->type = STACK_VTYPE;
8989 ins->klass = mono_defaults.typed_reference_class;
8996 MonoClass *handle_class;
8998 CHECK_STACK_OVF (1);
9001 n = read32 (ip + 1);
9003 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9004 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9005 handle = mono_method_get_wrapper_data (method, n);
9006 handle_class = mono_method_get_wrapper_data (method, n + 1);
9007 if (handle_class == mono_defaults.typehandle_class)
9008 handle = &((MonoClass*)handle)->byval_arg;
9011 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9015 mono_class_init (handle_class);
9016 if (cfg->generic_sharing_context) {
9017 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9018 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9019 /* This case handles ldtoken
9020 of an open type, like for
9023 } else if (handle_class == mono_defaults.typehandle_class) {
9024 /* If we get a MONO_TYPE_CLASS
9025 then we need to provide the
9027 instantiation of it. */
9028 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9031 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9032 } else if (handle_class == mono_defaults.fieldhandle_class)
9033 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9034 else if (handle_class == mono_defaults.methodhandle_class)
9035 context_used = mono_method_check_context_used (handle);
9037 g_assert_not_reached ();
9040 if ((cfg->opt & MONO_OPT_SHARED) &&
9041 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9042 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9043 MonoInst *addr, *vtvar, *iargs [3];
9044 int method_context_used;
9046 if (cfg->generic_sharing_context)
9047 method_context_used = mono_method_check_context_used (method);
9049 method_context_used = 0;
9051 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9053 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9054 EMIT_NEW_ICONST (cfg, iargs [1], n);
9055 if (method_context_used) {
9056 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9057 method, MONO_RGCTX_INFO_METHOD);
9058 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9060 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9061 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9063 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9065 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9067 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9069 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9070 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9071 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9072 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9073 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9074 MonoClass *tclass = mono_class_from_mono_type (handle);
9076 mono_class_init (tclass);
9078 ins = emit_get_rgctx_klass (cfg, context_used,
9079 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9080 } else if (cfg->compile_aot) {
9081 if (method->wrapper_type) {
9082 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9083 /* Special case for static synchronized wrappers */
9084 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9086 /* FIXME: n is not a normal token */
9087 cfg->disable_aot = TRUE;
9088 EMIT_NEW_PCONST (cfg, ins, NULL);
9091 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9094 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9096 ins->type = STACK_OBJ;
9097 ins->klass = cmethod->klass;
9100 MonoInst *addr, *vtvar;
9102 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9105 if (handle_class == mono_defaults.typehandle_class) {
9106 ins = emit_get_rgctx_klass (cfg, context_used,
9107 mono_class_from_mono_type (handle),
9108 MONO_RGCTX_INFO_TYPE);
9109 } else if (handle_class == mono_defaults.methodhandle_class) {
9110 ins = emit_get_rgctx_method (cfg, context_used,
9111 handle, MONO_RGCTX_INFO_METHOD);
9112 } else if (handle_class == mono_defaults.fieldhandle_class) {
9113 ins = emit_get_rgctx_field (cfg, context_used,
9114 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9116 g_assert_not_reached ();
9118 } else if (cfg->compile_aot) {
9119 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9121 EMIT_NEW_PCONST (cfg, ins, handle);
9123 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9124 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9125 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9135 MONO_INST_NEW (cfg, ins, OP_THROW);
9137 ins->sreg1 = sp [0]->dreg;
9139 bblock->out_of_line = TRUE;
9140 MONO_ADD_INS (bblock, ins);
9141 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9142 MONO_ADD_INS (bblock, ins);
9145 link_bblock (cfg, bblock, end_bblock);
9146 start_new_bblock = 1;
9148 case CEE_ENDFINALLY:
9149 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9150 MONO_ADD_INS (bblock, ins);
9152 start_new_bblock = 1;
9155 * Control will leave the method so empty the stack, otherwise
9156 * the next basic block will start with a nonempty stack.
9158 while (sp != stack_start) {
9166 if (*ip == CEE_LEAVE) {
9168 target = ip + 5 + (gint32)read32(ip + 1);
9171 target = ip + 2 + (signed char)(ip [1]);
9174 /* empty the stack */
9175 while (sp != stack_start) {
9180 * If this leave statement is in a catch block, check for a
9181 * pending exception, and rethrow it if necessary.
9182 * We avoid doing this in runtime invoke wrappers, since those are called
9183 * by native code which excepts the wrapper to catch all exceptions.
9185 for (i = 0; i < header->num_clauses; ++i) {
9186 MonoExceptionClause *clause = &header->clauses [i];
9189 * Use <= in the final comparison to handle clauses with multiple
9190 * leave statements, like in bug #78024.
9191 * The ordering of the exception clauses guarantees that we find the
9194 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9196 MonoBasicBlock *dont_throw;
9201 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9204 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9206 NEW_BBLOCK (cfg, dont_throw);
9209 * Currently, we allways rethrow the abort exception, despite the
9210 * fact that this is not correct. See thread6.cs for an example.
9211 * But propagating the abort exception is more important than
9212 * getting the sematics right.
9214 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9215 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9216 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9218 MONO_START_BB (cfg, dont_throw);
9223 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9225 MonoExceptionClause *clause;
9227 for (tmp = handlers; tmp; tmp = tmp->next) {
9229 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9231 link_bblock (cfg, bblock, tblock);
9232 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9233 ins->inst_target_bb = tblock;
9234 ins->inst_eh_block = clause;
9235 MONO_ADD_INS (bblock, ins);
9236 bblock->has_call_handler = 1;
9237 if (COMPILE_LLVM (cfg)) {
9238 MonoBasicBlock *target_bb;
9241 * Link the finally bblock with the target, since it will
9242 * conceptually branch there.
9243 * FIXME: Have to link the bblock containing the endfinally.
9245 GET_BBLOCK (cfg, target_bb, target);
9246 link_bblock (cfg, tblock, target_bb);
9249 g_list_free (handlers);
9252 MONO_INST_NEW (cfg, ins, OP_BR);
9253 MONO_ADD_INS (bblock, ins);
9254 GET_BBLOCK (cfg, tblock, target);
9255 link_bblock (cfg, bblock, tblock);
9256 ins->inst_target_bb = tblock;
9257 start_new_bblock = 1;
9259 if (*ip == CEE_LEAVE)
9268 * Mono specific opcodes
9270 case MONO_CUSTOM_PREFIX: {
9272 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9276 case CEE_MONO_ICALL: {
9278 MonoJitICallInfo *info;
9280 token = read32 (ip + 2);
9281 func = mono_method_get_wrapper_data (method, token);
9282 info = mono_find_jit_icall_by_addr (func);
9285 CHECK_STACK (info->sig->param_count);
9286 sp -= info->sig->param_count;
9288 ins = mono_emit_jit_icall (cfg, info->func, sp);
9289 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9293 inline_costs += 10 * num_calls++;
9297 case CEE_MONO_LDPTR: {
9300 CHECK_STACK_OVF (1);
9302 token = read32 (ip + 2);
9304 ptr = mono_method_get_wrapper_data (method, token);
9305 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9306 MonoJitICallInfo *callinfo;
9307 const char *icall_name;
9309 icall_name = method->name + strlen ("__icall_wrapper_");
9310 g_assert (icall_name);
9311 callinfo = mono_find_jit_icall_by_name (icall_name);
9312 g_assert (callinfo);
9314 if (ptr == callinfo->func) {
9315 /* Will be transformed into an AOTCONST later */
9316 EMIT_NEW_PCONST (cfg, ins, ptr);
9322 /* FIXME: Generalize this */
9323 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9324 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9329 EMIT_NEW_PCONST (cfg, ins, ptr);
9332 inline_costs += 10 * num_calls++;
9333 /* Can't embed random pointers into AOT code */
9334 cfg->disable_aot = 1;
9337 case CEE_MONO_ICALL_ADDR: {
9338 MonoMethod *cmethod;
9341 CHECK_STACK_OVF (1);
9343 token = read32 (ip + 2);
9345 cmethod = mono_method_get_wrapper_data (method, token);
9347 if (cfg->compile_aot) {
9348 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9350 ptr = mono_lookup_internal_call (cmethod);
9352 EMIT_NEW_PCONST (cfg, ins, ptr);
9358 case CEE_MONO_VTADDR: {
9359 MonoInst *src_var, *src;
9365 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9366 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9371 case CEE_MONO_NEWOBJ: {
9372 MonoInst *iargs [2];
9374 CHECK_STACK_OVF (1);
9376 token = read32 (ip + 2);
9377 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9378 mono_class_init (klass);
9379 NEW_DOMAINCONST (cfg, iargs [0]);
9380 MONO_ADD_INS (cfg->cbb, iargs [0]);
9381 NEW_CLASSCONST (cfg, iargs [1], klass);
9382 MONO_ADD_INS (cfg->cbb, iargs [1]);
9383 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9385 inline_costs += 10 * num_calls++;
9388 case CEE_MONO_OBJADDR:
9391 MONO_INST_NEW (cfg, ins, OP_MOVE);
9392 ins->dreg = alloc_preg (cfg);
9393 ins->sreg1 = sp [0]->dreg;
9394 ins->type = STACK_MP;
9395 MONO_ADD_INS (cfg->cbb, ins);
9399 case CEE_MONO_LDNATIVEOBJ:
9401 * Similar to LDOBJ, but instead load the unmanaged
9402 * representation of the vtype to the stack.
9407 token = read32 (ip + 2);
9408 klass = mono_method_get_wrapper_data (method, token);
9409 g_assert (klass->valuetype);
9410 mono_class_init (klass);
9413 MonoInst *src, *dest, *temp;
9416 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9417 temp->backend.is_pinvoke = 1;
9418 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9419 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9421 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9422 dest->type = STACK_VTYPE;
9423 dest->klass = klass;
9429 case CEE_MONO_RETOBJ: {
9431 * Same as RET, but return the native representation of a vtype
9434 g_assert (cfg->ret);
9435 g_assert (mono_method_signature (method)->pinvoke);
9440 token = read32 (ip + 2);
9441 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9443 if (!cfg->vret_addr) {
9444 g_assert (cfg->ret_var_is_local);
9446 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9448 EMIT_NEW_RETLOADA (cfg, ins);
9450 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9452 if (sp != stack_start)
9455 MONO_INST_NEW (cfg, ins, OP_BR);
9456 ins->inst_target_bb = end_bblock;
9457 MONO_ADD_INS (bblock, ins);
9458 link_bblock (cfg, bblock, end_bblock);
9459 start_new_bblock = 1;
9463 case CEE_MONO_CISINST:
9464 case CEE_MONO_CCASTCLASS: {
9469 token = read32 (ip + 2);
9470 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9471 if (ip [1] == CEE_MONO_CISINST)
9472 ins = handle_cisinst (cfg, klass, sp [0]);
9474 ins = handle_ccastclass (cfg, klass, sp [0]);
9480 case CEE_MONO_SAVE_LMF:
9481 case CEE_MONO_RESTORE_LMF:
9482 #ifdef MONO_ARCH_HAVE_LMF_OPS
9483 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9484 MONO_ADD_INS (bblock, ins);
9485 cfg->need_lmf_area = TRUE;
9489 case CEE_MONO_CLASSCONST:
9490 CHECK_STACK_OVF (1);
9492 token = read32 (ip + 2);
9493 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9496 inline_costs += 10 * num_calls++;
9498 case CEE_MONO_NOT_TAKEN:
9499 bblock->out_of_line = TRUE;
9503 CHECK_STACK_OVF (1);
9505 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9506 ins->dreg = alloc_preg (cfg);
9507 ins->inst_offset = (gint32)read32 (ip + 2);
9508 ins->type = STACK_PTR;
9509 MONO_ADD_INS (bblock, ins);
9513 case CEE_MONO_DYN_CALL: {
9516 /* It would be easier to call a trampoline, but that would put an
9517 * extra frame on the stack, confusing exception handling. So
9518 * implement it inline using an opcode for now.
9521 if (!cfg->dyn_call_var) {
9522 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9523 /* prevent it from being register allocated */
9524 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9527 /* Has to use a call inst since it local regalloc expects it */
9528 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9529 ins = (MonoInst*)call;
9531 ins->sreg1 = sp [0]->dreg;
9532 ins->sreg2 = sp [1]->dreg;
9533 MONO_ADD_INS (bblock, ins);
9535 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9536 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9540 inline_costs += 10 * num_calls++;
9545 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9555 /* somewhat similar to LDTOKEN */
9556 MonoInst *addr, *vtvar;
9557 CHECK_STACK_OVF (1);
9558 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9560 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9561 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9563 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9564 ins->type = STACK_VTYPE;
9565 ins->klass = mono_defaults.argumenthandle_class;
9578 * The following transforms:
9579 * CEE_CEQ into OP_CEQ
9580 * CEE_CGT into OP_CGT
9581 * CEE_CGT_UN into OP_CGT_UN
9582 * CEE_CLT into OP_CLT
9583 * CEE_CLT_UN into OP_CLT_UN
9585 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9587 MONO_INST_NEW (cfg, ins, cmp->opcode);
9589 cmp->sreg1 = sp [0]->dreg;
9590 cmp->sreg2 = sp [1]->dreg;
9591 type_from_op (cmp, sp [0], sp [1]);
9593 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9594 cmp->opcode = OP_LCOMPARE;
9595 else if (sp [0]->type == STACK_R8)
9596 cmp->opcode = OP_FCOMPARE;
9598 cmp->opcode = OP_ICOMPARE;
9599 MONO_ADD_INS (bblock, cmp);
9600 ins->type = STACK_I4;
9601 ins->dreg = alloc_dreg (cfg, ins->type);
9602 type_from_op (ins, sp [0], sp [1]);
9604 if (cmp->opcode == OP_FCOMPARE) {
9606 * The backends expect the fceq opcodes to do the
9609 cmp->opcode = OP_NOP;
9610 ins->sreg1 = cmp->sreg1;
9611 ins->sreg2 = cmp->sreg2;
9613 MONO_ADD_INS (bblock, ins);
9620 MonoMethod *cil_method;
9621 gboolean needs_static_rgctx_invoke;
9623 CHECK_STACK_OVF (1);
9625 n = read32 (ip + 2);
9626 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9627 if (!cmethod || mono_loader_get_last_error ())
9629 mono_class_init (cmethod->klass);
9631 mono_save_token_info (cfg, image, n, cmethod);
9633 if (cfg->generic_sharing_context)
9634 context_used = mono_method_check_context_used (cmethod);
9636 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9638 cil_method = cmethod;
9639 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9640 METHOD_ACCESS_FAILURE;
9642 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9643 if (check_linkdemand (cfg, method, cmethod))
9645 CHECK_CFG_EXCEPTION;
9646 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9647 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9651 * Optimize the common case of ldftn+delegate creation
9653 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9654 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9655 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9656 MonoInst *target_ins;
9658 int invoke_context_used = 0;
9660 invoke = mono_get_delegate_invoke (ctor_method->klass);
9661 if (!invoke || !mono_method_signature (invoke))
9664 if (cfg->generic_sharing_context)
9665 invoke_context_used = mono_method_check_context_used (invoke);
9667 target_ins = sp [-1];
9669 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9670 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9671 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9673 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9677 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9678 /* FIXME: SGEN support */
9679 if (invoke_context_used == 0) {
9681 if (cfg->verbose_level > 3)
9682 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9684 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9685 CHECK_CFG_EXCEPTION;
9694 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9695 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9699 inline_costs += 10 * num_calls++;
9702 case CEE_LDVIRTFTN: {
9707 n = read32 (ip + 2);
9708 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9709 if (!cmethod || mono_loader_get_last_error ())
9711 mono_class_init (cmethod->klass);
9713 if (cfg->generic_sharing_context)
9714 context_used = mono_method_check_context_used (cmethod);
9716 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9717 if (check_linkdemand (cfg, method, cmethod))
9719 CHECK_CFG_EXCEPTION;
9720 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9721 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9727 args [1] = emit_get_rgctx_method (cfg, context_used,
9728 cmethod, MONO_RGCTX_INFO_METHOD);
9731 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9733 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9736 inline_costs += 10 * num_calls++;
9740 CHECK_STACK_OVF (1);
9742 n = read16 (ip + 2);
9744 EMIT_NEW_ARGLOAD (cfg, ins, n);
9749 CHECK_STACK_OVF (1);
9751 n = read16 (ip + 2);
9753 NEW_ARGLOADA (cfg, ins, n);
9754 MONO_ADD_INS (cfg->cbb, ins);
9762 n = read16 (ip + 2);
9764 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9766 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9770 CHECK_STACK_OVF (1);
9772 n = read16 (ip + 2);
9774 EMIT_NEW_LOCLOAD (cfg, ins, n);
9779 unsigned char *tmp_ip;
9780 CHECK_STACK_OVF (1);
9782 n = read16 (ip + 2);
9785 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9791 EMIT_NEW_LOCLOADA (cfg, ins, n);
9800 n = read16 (ip + 2);
9802 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9804 emit_stloc_ir (cfg, sp, header, n);
9811 if (sp != stack_start)
9813 if (cfg->method != method)
9815 * Inlining this into a loop in a parent could lead to
9816 * stack overflows which is different behavior than the
9817 * non-inlined case, thus disable inlining in this case.
9819 goto inline_failure;
9821 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9822 ins->dreg = alloc_preg (cfg);
9823 ins->sreg1 = sp [0]->dreg;
9824 ins->type = STACK_PTR;
9825 MONO_ADD_INS (cfg->cbb, ins);
9827 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9829 ins->flags |= MONO_INST_INIT;
9834 case CEE_ENDFILTER: {
9835 MonoExceptionClause *clause, *nearest;
9836 int cc, nearest_num;
9840 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9842 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9843 ins->sreg1 = (*sp)->dreg;
9844 MONO_ADD_INS (bblock, ins);
9845 start_new_bblock = 1;
9850 for (cc = 0; cc < header->num_clauses; ++cc) {
9851 clause = &header->clauses [cc];
9852 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9853 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9854 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9860 if ((ip - header->code) != nearest->handler_offset)
9865 case CEE_UNALIGNED_:
9866 ins_flag |= MONO_INST_UNALIGNED;
9867 /* FIXME: record alignment? we can assume 1 for now */
9872 ins_flag |= MONO_INST_VOLATILE;
9876 ins_flag |= MONO_INST_TAILCALL;
9877 cfg->flags |= MONO_CFG_HAS_TAIL;
9878 /* Can't inline tail calls at this time */
9879 inline_costs += 100000;
9886 token = read32 (ip + 2);
9887 klass = mini_get_class (method, token, generic_context);
9888 CHECK_TYPELOAD (klass);
9889 if (generic_class_is_reference_type (cfg, klass))
9890 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9892 mini_emit_initobj (cfg, *sp, NULL, klass);
9896 case CEE_CONSTRAINED_:
9898 token = read32 (ip + 2);
9899 if (method->wrapper_type != MONO_WRAPPER_NONE)
9900 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9902 constrained_call = mono_class_get_full (image, token, generic_context);
9903 CHECK_TYPELOAD (constrained_call);
9908 MonoInst *iargs [3];
9912 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9913 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9914 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9915 /* emit_memset only works when val == 0 */
9916 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9921 if (ip [1] == CEE_CPBLK) {
9922 MonoMethod *memcpy_method = get_memcpy_method ();
9923 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9925 MonoMethod *memset_method = get_memset_method ();
9926 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9936 ins_flag |= MONO_INST_NOTYPECHECK;
9938 ins_flag |= MONO_INST_NORANGECHECK;
9939 /* we ignore the no-nullcheck for now since we
9940 * really do it explicitly only when doing callvirt->call
9946 int handler_offset = -1;
9948 for (i = 0; i < header->num_clauses; ++i) {
9949 MonoExceptionClause *clause = &header->clauses [i];
9950 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9951 handler_offset = clause->handler_offset;
9956 bblock->flags |= BB_EXCEPTION_UNSAFE;
9958 g_assert (handler_offset != -1);
9960 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9961 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9962 ins->sreg1 = load->dreg;
9963 MONO_ADD_INS (bblock, ins);
9965 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9966 MONO_ADD_INS (bblock, ins);
9969 link_bblock (cfg, bblock, end_bblock);
9970 start_new_bblock = 1;
9978 CHECK_STACK_OVF (1);
9980 token = read32 (ip + 2);
9981 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9982 MonoType *type = mono_type_create_from_typespec (image, token);
9983 token = mono_type_size (type, &ialign);
9985 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9986 CHECK_TYPELOAD (klass);
9987 mono_class_init (klass);
9988 token = mono_class_value_size (klass, &align);
9990 EMIT_NEW_ICONST (cfg, ins, token);
9995 case CEE_REFANYTYPE: {
9996 MonoInst *src_var, *src;
10002 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10004 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10005 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10006 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10011 case CEE_READONLY_:
10024 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10034 g_warning ("opcode 0x%02x not handled", *ip);
10038 if (start_new_bblock != 1)
10041 bblock->cil_length = ip - bblock->cil_code;
10042 bblock->next_bb = end_bblock;
10044 if (cfg->method == method && cfg->domainvar) {
10046 MonoInst *get_domain;
10048 cfg->cbb = init_localsbb;
10050 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10051 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10054 get_domain->dreg = alloc_preg (cfg);
10055 MONO_ADD_INS (cfg->cbb, get_domain);
10057 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10058 MONO_ADD_INS (cfg->cbb, store);
10061 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10062 if (cfg->compile_aot)
10063 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10064 mono_get_got_var (cfg);
10067 if (cfg->method == method && cfg->got_var)
10068 mono_emit_load_got_addr (cfg);
10073 cfg->cbb = init_localsbb;
10075 for (i = 0; i < header->num_locals; ++i) {
10076 MonoType *ptype = header->locals [i];
10077 int t = ptype->type;
10078 dreg = cfg->locals [i]->dreg;
10080 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10081 t = mono_class_enum_basetype (ptype->data.klass)->type;
10082 if (ptype->byref) {
10083 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10084 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10085 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10086 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10087 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10088 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10089 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10090 ins->type = STACK_R8;
10091 ins->inst_p0 = (void*)&r8_0;
10092 ins->dreg = alloc_dreg (cfg, STACK_R8);
10093 MONO_ADD_INS (init_localsbb, ins);
10094 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10095 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10096 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10097 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10099 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10104 if (cfg->init_ref_vars && cfg->method == method) {
10105 /* Emit initialization for ref vars */
10106 // FIXME: Avoid duplication initialization for IL locals.
10107 for (i = 0; i < cfg->num_varinfo; ++i) {
10108 MonoInst *ins = cfg->varinfo [i];
10110 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10111 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10115 /* Add a sequence point for method entry/exit events */
10117 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10118 MONO_ADD_INS (init_localsbb, ins);
10119 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10120 MONO_ADD_INS (cfg->bb_exit, ins);
10125 if (cfg->method == method) {
10126 MonoBasicBlock *bb;
10127 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10128 bb->region = mono_find_block_region (cfg, bb->real_offset);
10130 mono_create_spvar_for_region (cfg, bb->region);
10131 if (cfg->verbose_level > 2)
10132 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10136 g_slist_free (class_inits);
10137 dont_inline = g_list_remove (dont_inline, method);
10139 if (inline_costs < 0) {
10142 /* Method is too large */
10143 mname = mono_method_full_name (method, TRUE);
10144 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10145 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10147 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10148 mono_basic_block_free (original_bb);
10152 if ((cfg->verbose_level > 2) && (cfg->method == method))
10153 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10155 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10156 mono_basic_block_free (original_bb);
10157 return inline_costs;
10160 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10167 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10171 set_exception_type_from_invalid_il (cfg, method, ip);
10175 g_slist_free (class_inits);
10176 mono_basic_block_free (original_bb);
10177 dont_inline = g_list_remove (dont_inline, method);
10178 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10183 store_membase_reg_to_store_membase_imm (int opcode)
10186 case OP_STORE_MEMBASE_REG:
10187 return OP_STORE_MEMBASE_IMM;
10188 case OP_STOREI1_MEMBASE_REG:
10189 return OP_STOREI1_MEMBASE_IMM;
10190 case OP_STOREI2_MEMBASE_REG:
10191 return OP_STOREI2_MEMBASE_IMM;
10192 case OP_STOREI4_MEMBASE_REG:
10193 return OP_STOREI4_MEMBASE_IMM;
10194 case OP_STOREI8_MEMBASE_REG:
10195 return OP_STOREI8_MEMBASE_IMM;
10197 g_assert_not_reached ();
10203 #endif /* DISABLE_JIT */
10206 mono_op_to_op_imm (int opcode)
10210 return OP_IADD_IMM;
10212 return OP_ISUB_IMM;
10214 return OP_IDIV_IMM;
10216 return OP_IDIV_UN_IMM;
10218 return OP_IREM_IMM;
10220 return OP_IREM_UN_IMM;
10222 return OP_IMUL_IMM;
10224 return OP_IAND_IMM;
10228 return OP_IXOR_IMM;
10230 return OP_ISHL_IMM;
10232 return OP_ISHR_IMM;
10234 return OP_ISHR_UN_IMM;
10237 return OP_LADD_IMM;
10239 return OP_LSUB_IMM;
10241 return OP_LAND_IMM;
10245 return OP_LXOR_IMM;
10247 return OP_LSHL_IMM;
10249 return OP_LSHR_IMM;
10251 return OP_LSHR_UN_IMM;
10254 return OP_COMPARE_IMM;
10256 return OP_ICOMPARE_IMM;
10258 return OP_LCOMPARE_IMM;
10260 case OP_STORE_MEMBASE_REG:
10261 return OP_STORE_MEMBASE_IMM;
10262 case OP_STOREI1_MEMBASE_REG:
10263 return OP_STOREI1_MEMBASE_IMM;
10264 case OP_STOREI2_MEMBASE_REG:
10265 return OP_STOREI2_MEMBASE_IMM;
10266 case OP_STOREI4_MEMBASE_REG:
10267 return OP_STOREI4_MEMBASE_IMM;
10269 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10271 return OP_X86_PUSH_IMM;
10272 case OP_X86_COMPARE_MEMBASE_REG:
10273 return OP_X86_COMPARE_MEMBASE_IMM;
10275 #if defined(TARGET_AMD64)
10276 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10277 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10279 case OP_VOIDCALL_REG:
10280 return OP_VOIDCALL;
10288 return OP_LOCALLOC_IMM;
10295 ldind_to_load_membase (int opcode)
10299 return OP_LOADI1_MEMBASE;
10301 return OP_LOADU1_MEMBASE;
10303 return OP_LOADI2_MEMBASE;
10305 return OP_LOADU2_MEMBASE;
10307 return OP_LOADI4_MEMBASE;
10309 return OP_LOADU4_MEMBASE;
10311 return OP_LOAD_MEMBASE;
10312 case CEE_LDIND_REF:
10313 return OP_LOAD_MEMBASE;
10315 return OP_LOADI8_MEMBASE;
10317 return OP_LOADR4_MEMBASE;
10319 return OP_LOADR8_MEMBASE;
10321 g_assert_not_reached ();
10328 stind_to_store_membase (int opcode)
10332 return OP_STOREI1_MEMBASE_REG;
10334 return OP_STOREI2_MEMBASE_REG;
10336 return OP_STOREI4_MEMBASE_REG;
10338 case CEE_STIND_REF:
10339 return OP_STORE_MEMBASE_REG;
10341 return OP_STOREI8_MEMBASE_REG;
10343 return OP_STORER4_MEMBASE_REG;
10345 return OP_STORER8_MEMBASE_REG;
10347 g_assert_not_reached ();
10354 mono_load_membase_to_load_mem (int opcode)
10356 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10359 case OP_LOAD_MEMBASE:
10360 return OP_LOAD_MEM;
10361 case OP_LOADU1_MEMBASE:
10362 return OP_LOADU1_MEM;
10363 case OP_LOADU2_MEMBASE:
10364 return OP_LOADU2_MEM;
10365 case OP_LOADI4_MEMBASE:
10366 return OP_LOADI4_MEM;
10367 case OP_LOADU4_MEMBASE:
10368 return OP_LOADU4_MEM;
10369 #if SIZEOF_REGISTER == 8
10370 case OP_LOADI8_MEMBASE:
10371 return OP_LOADI8_MEM;
10380 op_to_op_dest_membase (int store_opcode, int opcode)
10382 #if defined(TARGET_X86)
10383 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10388 return OP_X86_ADD_MEMBASE_REG;
10390 return OP_X86_SUB_MEMBASE_REG;
10392 return OP_X86_AND_MEMBASE_REG;
10394 return OP_X86_OR_MEMBASE_REG;
10396 return OP_X86_XOR_MEMBASE_REG;
10399 return OP_X86_ADD_MEMBASE_IMM;
10402 return OP_X86_SUB_MEMBASE_IMM;
10405 return OP_X86_AND_MEMBASE_IMM;
10408 return OP_X86_OR_MEMBASE_IMM;
10411 return OP_X86_XOR_MEMBASE_IMM;
10417 #if defined(TARGET_AMD64)
10418 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10423 return OP_X86_ADD_MEMBASE_REG;
10425 return OP_X86_SUB_MEMBASE_REG;
10427 return OP_X86_AND_MEMBASE_REG;
10429 return OP_X86_OR_MEMBASE_REG;
10431 return OP_X86_XOR_MEMBASE_REG;
10433 return OP_X86_ADD_MEMBASE_IMM;
10435 return OP_X86_SUB_MEMBASE_IMM;
10437 return OP_X86_AND_MEMBASE_IMM;
10439 return OP_X86_OR_MEMBASE_IMM;
10441 return OP_X86_XOR_MEMBASE_IMM;
10443 return OP_AMD64_ADD_MEMBASE_REG;
10445 return OP_AMD64_SUB_MEMBASE_REG;
10447 return OP_AMD64_AND_MEMBASE_REG;
10449 return OP_AMD64_OR_MEMBASE_REG;
10451 return OP_AMD64_XOR_MEMBASE_REG;
10454 return OP_AMD64_ADD_MEMBASE_IMM;
10457 return OP_AMD64_SUB_MEMBASE_IMM;
10460 return OP_AMD64_AND_MEMBASE_IMM;
10463 return OP_AMD64_OR_MEMBASE_IMM;
10466 return OP_AMD64_XOR_MEMBASE_IMM;
10476 op_to_op_store_membase (int store_opcode, int opcode)
10478 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10481 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10482 return OP_X86_SETEQ_MEMBASE;
10484 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10485 return OP_X86_SETNE_MEMBASE;
10493 op_to_op_src1_membase (int load_opcode, int opcode)
10496 /* FIXME: This has sign extension issues */
10498 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10499 return OP_X86_COMPARE_MEMBASE8_IMM;
10502 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10507 return OP_X86_PUSH_MEMBASE;
10508 case OP_COMPARE_IMM:
10509 case OP_ICOMPARE_IMM:
10510 return OP_X86_COMPARE_MEMBASE_IMM;
10513 return OP_X86_COMPARE_MEMBASE_REG;
10517 #ifdef TARGET_AMD64
10518 /* FIXME: This has sign extension issues */
10520 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10521 return OP_X86_COMPARE_MEMBASE8_IMM;
10526 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10527 return OP_X86_PUSH_MEMBASE;
10529 /* FIXME: This only works for 32 bit immediates
10530 case OP_COMPARE_IMM:
10531 case OP_LCOMPARE_IMM:
10532 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10533 return OP_AMD64_COMPARE_MEMBASE_IMM;
10535 case OP_ICOMPARE_IMM:
10536 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10537 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10541 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10542 return OP_AMD64_COMPARE_MEMBASE_REG;
10545 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10546 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10555 op_to_op_src2_membase (int load_opcode, int opcode)
10558 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10564 return OP_X86_COMPARE_REG_MEMBASE;
10566 return OP_X86_ADD_REG_MEMBASE;
10568 return OP_X86_SUB_REG_MEMBASE;
10570 return OP_X86_AND_REG_MEMBASE;
10572 return OP_X86_OR_REG_MEMBASE;
10574 return OP_X86_XOR_REG_MEMBASE;
10578 #ifdef TARGET_AMD64
10579 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10582 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10584 return OP_X86_ADD_REG_MEMBASE;
10586 return OP_X86_SUB_REG_MEMBASE;
10588 return OP_X86_AND_REG_MEMBASE;
10590 return OP_X86_OR_REG_MEMBASE;
10592 return OP_X86_XOR_REG_MEMBASE;
10594 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10598 return OP_AMD64_COMPARE_REG_MEMBASE;
10600 return OP_AMD64_ADD_REG_MEMBASE;
10602 return OP_AMD64_SUB_REG_MEMBASE;
10604 return OP_AMD64_AND_REG_MEMBASE;
10606 return OP_AMD64_OR_REG_MEMBASE;
10608 return OP_AMD64_XOR_REG_MEMBASE;
10617 mono_op_to_op_imm_noemul (int opcode)
10620 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10626 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10634 return mono_op_to_op_imm (opcode);
10638 #ifndef DISABLE_JIT
10641 * mono_handle_global_vregs:
10643 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10647 mono_handle_global_vregs (MonoCompile *cfg)
10649 gint32 *vreg_to_bb;
10650 MonoBasicBlock *bb;
10653 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10655 #ifdef MONO_ARCH_SIMD_INTRINSICS
10656 if (cfg->uses_simd_intrinsics)
10657 mono_simd_simplify_indirection (cfg);
10660 /* Find local vregs used in more than one bb */
10661 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10662 MonoInst *ins = bb->code;
10663 int block_num = bb->block_num;
10665 if (cfg->verbose_level > 2)
10666 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10669 for (; ins; ins = ins->next) {
10670 const char *spec = INS_INFO (ins->opcode);
10671 int regtype = 0, regindex;
10674 if (G_UNLIKELY (cfg->verbose_level > 2))
10675 mono_print_ins (ins);
10677 g_assert (ins->opcode >= MONO_CEE_LAST);
10679 for (regindex = 0; regindex < 4; regindex ++) {
10682 if (regindex == 0) {
10683 regtype = spec [MONO_INST_DEST];
10684 if (regtype == ' ')
10687 } else if (regindex == 1) {
10688 regtype = spec [MONO_INST_SRC1];
10689 if (regtype == ' ')
10692 } else if (regindex == 2) {
10693 regtype = spec [MONO_INST_SRC2];
10694 if (regtype == ' ')
10697 } else if (regindex == 3) {
10698 regtype = spec [MONO_INST_SRC3];
10699 if (regtype == ' ')
10704 #if SIZEOF_REGISTER == 4
10705 /* In the LLVM case, the long opcodes are not decomposed */
10706 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10708 * Since some instructions reference the original long vreg,
10709 * and some reference the two component vregs, it is quite hard
10710 * to determine when it needs to be global. So be conservative.
10712 if (!get_vreg_to_inst (cfg, vreg)) {
10713 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10715 if (cfg->verbose_level > 2)
10716 printf ("LONG VREG R%d made global.\n", vreg);
10720 * Make the component vregs volatile since the optimizations can
10721 * get confused otherwise.
10723 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10724 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10728 g_assert (vreg != -1);
10730 prev_bb = vreg_to_bb [vreg];
10731 if (prev_bb == 0) {
10732 /* 0 is a valid block num */
10733 vreg_to_bb [vreg] = block_num + 1;
10734 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10735 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10738 if (!get_vreg_to_inst (cfg, vreg)) {
10739 if (G_UNLIKELY (cfg->verbose_level > 2))
10740 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10744 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10747 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10750 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10753 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10756 g_assert_not_reached ();
10760 /* Flag as having been used in more than one bb */
10761 vreg_to_bb [vreg] = -1;
10767 /* If a variable is used in only one bblock, convert it into a local vreg */
10768 for (i = 0; i < cfg->num_varinfo; i++) {
10769 MonoInst *var = cfg->varinfo [i];
10770 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10772 switch (var->type) {
10778 #if SIZEOF_REGISTER == 8
10781 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10782 /* Enabling this screws up the fp stack on x86 */
10785 /* Arguments are implicitly global */
10786 /* Putting R4 vars into registers doesn't work currently */
10787 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10789 * Make that the variable's liveness interval doesn't contain a call, since
10790 * that would cause the lvreg to be spilled, making the whole optimization
10793 /* This is too slow for JIT compilation */
10795 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10797 int def_index, call_index, ins_index;
10798 gboolean spilled = FALSE;
10803 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10804 const char *spec = INS_INFO (ins->opcode);
10806 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10807 def_index = ins_index;
10809 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10810 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10811 if (call_index > def_index) {
10817 if (MONO_IS_CALL (ins))
10818 call_index = ins_index;
10828 if (G_UNLIKELY (cfg->verbose_level > 2))
10829 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10830 var->flags |= MONO_INST_IS_DEAD;
10831 cfg->vreg_to_inst [var->dreg] = NULL;
10838 * Compress the varinfo and vars tables so the liveness computation is faster and
10839 * takes up less space.
10842 for (i = 0; i < cfg->num_varinfo; ++i) {
10843 MonoInst *var = cfg->varinfo [i];
10844 if (pos < i && cfg->locals_start == i)
10845 cfg->locals_start = pos;
10846 if (!(var->flags & MONO_INST_IS_DEAD)) {
10848 cfg->varinfo [pos] = cfg->varinfo [i];
10849 cfg->varinfo [pos]->inst_c0 = pos;
10850 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10851 cfg->vars [pos].idx = pos;
10852 #if SIZEOF_REGISTER == 4
10853 if (cfg->varinfo [pos]->type == STACK_I8) {
10854 /* Modify the two component vars too */
10857 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10858 var1->inst_c0 = pos;
10859 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10860 var1->inst_c0 = pos;
10867 cfg->num_varinfo = pos;
10868 if (cfg->locals_start > cfg->num_varinfo)
10869 cfg->locals_start = cfg->num_varinfo;
10873 * mono_spill_global_vars:
10875 * Generate spill code for variables which are not allocated to registers,
10876 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10877 * code is generated which could be optimized by the local optimization passes.
10880 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10882 MonoBasicBlock *bb;
10884 int orig_next_vreg;
10885 guint32 *vreg_to_lvreg;
10887 guint32 i, lvregs_len;
10888 gboolean dest_has_lvreg = FALSE;
10889 guint32 stacktypes [128];
10890 MonoInst **live_range_start, **live_range_end;
10891 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10893 *need_local_opts = FALSE;
10895 memset (spec2, 0, sizeof (spec2));
10897 /* FIXME: Move this function to mini.c */
10898 stacktypes ['i'] = STACK_PTR;
10899 stacktypes ['l'] = STACK_I8;
10900 stacktypes ['f'] = STACK_R8;
10901 #ifdef MONO_ARCH_SIMD_INTRINSICS
10902 stacktypes ['x'] = STACK_VTYPE;
10905 #if SIZEOF_REGISTER == 4
10906 /* Create MonoInsts for longs */
10907 for (i = 0; i < cfg->num_varinfo; i++) {
10908 MonoInst *ins = cfg->varinfo [i];
10910 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10911 switch (ins->type) {
10916 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10919 g_assert (ins->opcode == OP_REGOFFSET);
10921 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10923 tree->opcode = OP_REGOFFSET;
10924 tree->inst_basereg = ins->inst_basereg;
10925 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10927 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10929 tree->opcode = OP_REGOFFSET;
10930 tree->inst_basereg = ins->inst_basereg;
10931 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10941 /* FIXME: widening and truncation */
10944 * As an optimization, when a variable allocated to the stack is first loaded into
10945 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10946 * the variable again.
10948 orig_next_vreg = cfg->next_vreg;
10949 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10950 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10954 * These arrays contain the first and last instructions accessing a given
10956 * Since we emit bblocks in the same order we process them here, and we
10957 * don't split live ranges, these will precisely describe the live range of
10958 * the variable, i.e. the instruction range where a valid value can be found
10959 * in the variables location.
10960 * The live range is computed using the liveness info computed by the liveness pass.
10961 * We can't use vmv->range, since that is an abstract live range, and we need
10962 * one which is instruction precise.
10963 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10965 /* FIXME: Only do this if debugging info is requested */
10966 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10967 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10968 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10969 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10971 /* Add spill loads/stores */
10972 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10975 if (cfg->verbose_level > 2)
10976 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10978 /* Clear vreg_to_lvreg array */
10979 for (i = 0; i < lvregs_len; i++)
10980 vreg_to_lvreg [lvregs [i]] = 0;
10984 MONO_BB_FOR_EACH_INS (bb, ins) {
10985 const char *spec = INS_INFO (ins->opcode);
10986 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10987 gboolean store, no_lvreg;
10988 int sregs [MONO_MAX_SRC_REGS];
10990 if (G_UNLIKELY (cfg->verbose_level > 2))
10991 mono_print_ins (ins);
10993 if (ins->opcode == OP_NOP)
10997 * We handle LDADDR here as well, since it can only be decomposed
10998 * when variable addresses are known.
11000 if (ins->opcode == OP_LDADDR) {
11001 MonoInst *var = ins->inst_p0;
11003 if (var->opcode == OP_VTARG_ADDR) {
11004 /* Happens on SPARC/S390 where vtypes are passed by reference */
11005 MonoInst *vtaddr = var->inst_left;
11006 if (vtaddr->opcode == OP_REGVAR) {
11007 ins->opcode = OP_MOVE;
11008 ins->sreg1 = vtaddr->dreg;
11010 else if (var->inst_left->opcode == OP_REGOFFSET) {
11011 ins->opcode = OP_LOAD_MEMBASE;
11012 ins->inst_basereg = vtaddr->inst_basereg;
11013 ins->inst_offset = vtaddr->inst_offset;
11017 g_assert (var->opcode == OP_REGOFFSET);
11019 ins->opcode = OP_ADD_IMM;
11020 ins->sreg1 = var->inst_basereg;
11021 ins->inst_imm = var->inst_offset;
11024 *need_local_opts = TRUE;
11025 spec = INS_INFO (ins->opcode);
11028 if (ins->opcode < MONO_CEE_LAST) {
11029 mono_print_ins (ins);
11030 g_assert_not_reached ();
11034 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11038 if (MONO_IS_STORE_MEMBASE (ins)) {
11039 tmp_reg = ins->dreg;
11040 ins->dreg = ins->sreg2;
11041 ins->sreg2 = tmp_reg;
11044 spec2 [MONO_INST_DEST] = ' ';
11045 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11046 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11047 spec2 [MONO_INST_SRC3] = ' ';
11049 } else if (MONO_IS_STORE_MEMINDEX (ins))
11050 g_assert_not_reached ();
11055 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11056 printf ("\t %.3s %d", spec, ins->dreg);
11057 num_sregs = mono_inst_get_src_registers (ins, sregs);
11058 for (srcindex = 0; srcindex < 3; ++srcindex)
11059 printf (" %d", sregs [srcindex]);
11066 regtype = spec [MONO_INST_DEST];
11067 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11070 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11071 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11072 MonoInst *store_ins;
11074 MonoInst *def_ins = ins;
11075 int dreg = ins->dreg; /* The original vreg */
11077 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11079 if (var->opcode == OP_REGVAR) {
11080 ins->dreg = var->dreg;
11081 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11083 * Instead of emitting a load+store, use a _membase opcode.
11085 g_assert (var->opcode == OP_REGOFFSET);
11086 if (ins->opcode == OP_MOVE) {
11090 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11091 ins->inst_basereg = var->inst_basereg;
11092 ins->inst_offset = var->inst_offset;
11095 spec = INS_INFO (ins->opcode);
11099 g_assert (var->opcode == OP_REGOFFSET);
11101 prev_dreg = ins->dreg;
11103 /* Invalidate any previous lvreg for this vreg */
11104 vreg_to_lvreg [ins->dreg] = 0;
11108 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11110 store_opcode = OP_STOREI8_MEMBASE_REG;
11113 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11115 if (regtype == 'l') {
11116 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11117 mono_bblock_insert_after_ins (bb, ins, store_ins);
11118 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11119 mono_bblock_insert_after_ins (bb, ins, store_ins);
11120 def_ins = store_ins;
11123 g_assert (store_opcode != OP_STOREV_MEMBASE);
11125 /* Try to fuse the store into the instruction itself */
11126 /* FIXME: Add more instructions */
11127 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11128 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11129 ins->inst_imm = ins->inst_c0;
11130 ins->inst_destbasereg = var->inst_basereg;
11131 ins->inst_offset = var->inst_offset;
11132 spec = INS_INFO (ins->opcode);
11133 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11134 ins->opcode = store_opcode;
11135 ins->inst_destbasereg = var->inst_basereg;
11136 ins->inst_offset = var->inst_offset;
11140 tmp_reg = ins->dreg;
11141 ins->dreg = ins->sreg2;
11142 ins->sreg2 = tmp_reg;
11145 spec2 [MONO_INST_DEST] = ' ';
11146 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11147 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11148 spec2 [MONO_INST_SRC3] = ' ';
11150 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11151 // FIXME: The backends expect the base reg to be in inst_basereg
11152 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11154 ins->inst_basereg = var->inst_basereg;
11155 ins->inst_offset = var->inst_offset;
11156 spec = INS_INFO (ins->opcode);
11158 /* printf ("INS: "); mono_print_ins (ins); */
11159 /* Create a store instruction */
11160 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11162 /* Insert it after the instruction */
11163 mono_bblock_insert_after_ins (bb, ins, store_ins);
11165 def_ins = store_ins;
11168 * We can't assign ins->dreg to var->dreg here, since the
11169 * sregs could use it. So set a flag, and do it after
11172 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11173 dest_has_lvreg = TRUE;
11178 if (def_ins && !live_range_start [dreg]) {
11179 live_range_start [dreg] = def_ins;
11180 live_range_start_bb [dreg] = bb;
11187 num_sregs = mono_inst_get_src_registers (ins, sregs);
11188 for (srcindex = 0; srcindex < 3; ++srcindex) {
11189 regtype = spec [MONO_INST_SRC1 + srcindex];
11190 sreg = sregs [srcindex];
11192 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11193 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11194 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11195 MonoInst *use_ins = ins;
11196 MonoInst *load_ins;
11197 guint32 load_opcode;
11199 if (var->opcode == OP_REGVAR) {
11200 sregs [srcindex] = var->dreg;
11201 //mono_inst_set_src_registers (ins, sregs);
11202 live_range_end [sreg] = use_ins;
11203 live_range_end_bb [sreg] = bb;
11207 g_assert (var->opcode == OP_REGOFFSET);
11209 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11211 g_assert (load_opcode != OP_LOADV_MEMBASE);
11213 if (vreg_to_lvreg [sreg]) {
11214 g_assert (vreg_to_lvreg [sreg] != -1);
11216 /* The variable is already loaded to an lvreg */
11217 if (G_UNLIKELY (cfg->verbose_level > 2))
11218 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11219 sregs [srcindex] = vreg_to_lvreg [sreg];
11220 //mono_inst_set_src_registers (ins, sregs);
11224 /* Try to fuse the load into the instruction */
11225 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11226 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11227 sregs [0] = var->inst_basereg;
11228 //mono_inst_set_src_registers (ins, sregs);
11229 ins->inst_offset = var->inst_offset;
11230 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11231 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11232 sregs [1] = var->inst_basereg;
11233 //mono_inst_set_src_registers (ins, sregs);
11234 ins->inst_offset = var->inst_offset;
11236 if (MONO_IS_REAL_MOVE (ins)) {
11237 ins->opcode = OP_NOP;
11240 //printf ("%d ", srcindex); mono_print_ins (ins);
11242 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11244 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11245 if (var->dreg == prev_dreg) {
11247 * sreg refers to the value loaded by the load
11248 * emitted below, but we need to use ins->dreg
11249 * since it refers to the store emitted earlier.
11253 g_assert (sreg != -1);
11254 vreg_to_lvreg [var->dreg] = sreg;
11255 g_assert (lvregs_len < 1024);
11256 lvregs [lvregs_len ++] = var->dreg;
11260 sregs [srcindex] = sreg;
11261 //mono_inst_set_src_registers (ins, sregs);
11263 if (regtype == 'l') {
11264 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11265 mono_bblock_insert_before_ins (bb, ins, load_ins);
11266 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11267 mono_bblock_insert_before_ins (bb, ins, load_ins);
11268 use_ins = load_ins;
11271 #if SIZEOF_REGISTER == 4
11272 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11274 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11275 mono_bblock_insert_before_ins (bb, ins, load_ins);
11276 use_ins = load_ins;
11280 if (var->dreg < orig_next_vreg) {
11281 live_range_end [var->dreg] = use_ins;
11282 live_range_end_bb [var->dreg] = bb;
11286 mono_inst_set_src_registers (ins, sregs);
11288 if (dest_has_lvreg) {
11289 g_assert (ins->dreg != -1);
11290 vreg_to_lvreg [prev_dreg] = ins->dreg;
11291 g_assert (lvregs_len < 1024);
11292 lvregs [lvregs_len ++] = prev_dreg;
11293 dest_has_lvreg = FALSE;
11297 tmp_reg = ins->dreg;
11298 ins->dreg = ins->sreg2;
11299 ins->sreg2 = tmp_reg;
11302 if (MONO_IS_CALL (ins)) {
11303 /* Clear vreg_to_lvreg array */
11304 for (i = 0; i < lvregs_len; i++)
11305 vreg_to_lvreg [lvregs [i]] = 0;
11307 } else if (ins->opcode == OP_NOP) {
11309 MONO_INST_NULLIFY_SREGS (ins);
11312 if (cfg->verbose_level > 2)
11313 mono_print_ins_index (1, ins);
11316 /* Extend the live range based on the liveness info */
11317 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11318 for (i = 0; i < cfg->num_varinfo; i ++) {
11319 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11321 if (vreg_is_volatile (cfg, vi->vreg))
11322 /* The liveness info is incomplete */
11325 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11326 /* Live from at least the first ins of this bb */
11327 live_range_start [vi->vreg] = bb->code;
11328 live_range_start_bb [vi->vreg] = bb;
11331 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11332 /* Live at least until the last ins of this bb */
11333 live_range_end [vi->vreg] = bb->last_ins;
11334 live_range_end_bb [vi->vreg] = bb;
11340 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11342 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11343 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11345 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11346 for (i = 0; i < cfg->num_varinfo; ++i) {
11347 int vreg = MONO_VARINFO (cfg, i)->vreg;
11350 if (live_range_start [vreg]) {
11351 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11353 ins->inst_c1 = vreg;
11354 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11356 if (live_range_end [vreg]) {
11357 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11359 ins->inst_c1 = vreg;
11360 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11361 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11363 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11369 g_free (live_range_start);
11370 g_free (live_range_end);
11371 g_free (live_range_start_bb);
11372 g_free (live_range_end_bb);
11377 * - use 'iadd' instead of 'int_add'
11378 * - handling ovf opcodes: decompose in method_to_ir.
11379 * - unify iregs/fregs
11380 * -> partly done, the missing parts are:
11381 * - a more complete unification would involve unifying the hregs as well, so
11382 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11383 * would no longer map to the machine hregs, so the code generators would need to
11384 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11385 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11386 * fp/non-fp branches speeds it up by about 15%.
11387 * - use sext/zext opcodes instead of shifts
11389 * - get rid of TEMPLOADs if possible and use vregs instead
11390 * - clean up usage of OP_P/OP_ opcodes
11391 * - cleanup usage of DUMMY_USE
11392 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11394 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11395 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11396 * - make sure handle_stack_args () is called before the branch is emitted
11397 * - when the new IR is done, get rid of all unused stuff
11398 * - COMPARE/BEQ as separate instructions or unify them ?
11399 * - keeping them separate allows specialized compare instructions like
11400 * compare_imm, compare_membase
11401 * - most back ends unify fp compare+branch, fp compare+ceq
11402 * - integrate mono_save_args into inline_method
11403 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11404 * - handle long shift opts on 32 bit platforms somehow: they require
11405 * 3 sregs (2 for arg1 and 1 for arg2)
11406 * - make byref a 'normal' type.
11407 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11408 * variable if needed.
11409 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11410 * like inline_method.
11411 * - remove inlining restrictions
11412 * - fix LNEG and enable cfold of INEG
11413 * - generalize x86 optimizations like ldelema as a peephole optimization
11414 * - add store_mem_imm for amd64
11415 * - optimize the loading of the interruption flag in the managed->native wrappers
11416 * - avoid special handling of OP_NOP in passes
11417 * - move code inserting instructions into one function/macro.
11418 * - try a coalescing phase after liveness analysis
11419 * - add float -> vreg conversion + local optimizations on !x86
11420 * - figure out how to handle decomposed branches during optimizations, ie.
11421 * compare+branch, op_jump_table+op_br etc.
11422 * - promote RuntimeXHandles to vregs
11423 * - vtype cleanups:
11424 * - add a NEW_VARLOADA_VREG macro
11425 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11426 * accessing vtype fields.
11427 * - get rid of I8CONST on 64 bit platforms
11428 * - dealing with the increase in code size due to branches created during opcode
11430 * - use extended basic blocks
11431 * - all parts of the JIT
11432 * - handle_global_vregs () && local regalloc
11433 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11434 * - sources of increase in code size:
11437 * - isinst and castclass
11438 * - lvregs not allocated to global registers even if used multiple times
11439 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11441 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11442 * - add all micro optimizations from the old JIT
11443 * - put tree optimizations into the deadce pass
11444 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11445 * specific function.
11446 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11447 * fcompare + branchCC.
11448 * - create a helper function for allocating a stack slot, taking into account
11449 * MONO_CFG_HAS_SPILLUP.
11451 * - merge the ia64 switch changes.
11452 * - optimize mono_regstate2_alloc_int/float.
11453 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11454 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11455 * parts of the tree could be separated by other instructions, killing the tree
11456 * arguments, or stores killing loads etc. Also, should we fold loads into other
11457 * instructions if the result of the load is used multiple times ?
11458 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11459 * - LAST MERGE: 108395.
11460 * - when returning vtypes in registers, generate IR and append it to the end of the
11461 * last bb instead of doing it in the epilog.
11462 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11470 - When to decompose opcodes:
11471 - earlier: this makes some optimizations hard to implement, since the low level IR
11472 no longer contains the neccessary information. But it is easier to do.
11473 - later: harder to implement, enables more optimizations.
11474 - Branches inside bblocks:
11475 - created when decomposing complex opcodes.
11476 - branches to another bblock: harmless, but not tracked by the branch
11477 optimizations, so need to branch to a label at the start of the bblock.
11478 - branches to inside the same bblock: very problematic, trips up the local
11479 reg allocator. Can be fixed by spitting the current bblock, but that is a
11480 complex operation, since some local vregs can become global vregs etc.
11481 - Local/global vregs:
11482 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11483 local register allocator.
11484 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11485 structure, created by mono_create_var (). Assigned to hregs or the stack by
11486 the global register allocator.
11487 - When to do optimizations like alu->alu_imm:
11488 - earlier -> saves work later on since the IR will be smaller/simpler
11489 - later -> can work on more instructions
11490 - Handling of valuetypes:
11491 - When a vtype is pushed on the stack, a new temporary is created, an
11492 instruction computing its address (LDADDR) is emitted and pushed on
11493 the stack. Need to optimize cases when the vtype is used immediately as in
11494 argument passing, stloc etc.
11495 - Instead of the to_end stuff in the old JIT, simply call the function handling
11496 the values on the stack before emitting the last instruction of the bb.
11499 #endif /* DISABLE_JIT */