2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg = alloc_preg (cfg);
2091 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2092 } else if (cfg->compile_aot) {
2093 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2096 MONO_INST_NEW (cfg, ins, OP_PCONST);
2097 ins->inst_p0 = call->method;
2098 ins->dreg = method_reg;
2099 MONO_ADD_INS (cfg->cbb, ins);
2103 if (COMPILE_LLVM (cfg))
2104 call->imt_arg_reg = method_reg;
2106 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2108 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2113 static MonoJumpInfo *
2114 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2116 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2120 ji->data.target = target;
2125 inline static MonoCallInst *
2126 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2127 MonoInst **args, int calli, int virtual, int tail)
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2135 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2137 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2140 call->signature = sig;
2142 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2145 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2146 call->vret_var = cfg->vret_addr;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2150 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2153 temp->backend.is_pinvoke = sig->pinvoke;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2164 loada->dreg = alloc_preg (cfg);
2165 loada->inst_p0 = temp;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada->inst_p1 = call;
2168 MONO_ADD_INS (cfg->cbb, loada);
2170 call->inst.dreg = temp->dreg;
2172 call->vret_var = loada;
2173 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2174 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2185 MonoInst *in = call->args [i];
2187 if (i >= sig->hasthis)
2188 t = sig->params [i - sig->hasthis];
2190 t = &mono_defaults.int_class->byval_arg;
2191 t = mono_type_get_underlying_type (t);
2193 if (!t->byref && t->type == MONO_TYPE_R4) {
2194 MonoInst *iargs [1];
2198 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2200 /* The result will be in an int vreg */
2201 call->args [i] = conv;
2208 if (COMPILE_LLVM (cfg))
2209 mono_llvm_emit_call (cfg, call);
2211 mono_arch_emit_call (cfg, call);
2213 mono_arch_emit_call (cfg, call);
2216 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2217 cfg->flags |= MONO_CFG_HAS_CALLS;
2222 inline static MonoInst*
2223 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2225 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2227 call->inst.sreg1 = addr->dreg;
2229 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2231 return (MonoInst*)call;
2235 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2239 cfg->uses_rgctx_reg = TRUE;
2240 call->rgctx_reg = TRUE;
2242 call->rgctx_arg_reg = rgctx_reg;
2249 inline static MonoInst*
2250 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2256 rgctx_reg = mono_alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2259 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2261 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2262 return (MonoInst*)call;
2266 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2271 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2272 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2274 gboolean might_be_remote;
2275 gboolean virtual = this != NULL;
2276 gboolean enable_for_aot = TRUE;
2280 if (method->string_ctor) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2284 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2289 might_be_remote = this && sig->hasthis &&
2290 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2291 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2293 context_used = mono_method_check_context_used (method);
2294 if (might_be_remote && context_used) {
2297 g_assert (cfg->generic_sharing_context);
2299 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2301 return mono_emit_calli (cfg, sig, args, addr);
2304 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2306 if (might_be_remote)
2307 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2309 call->method = method;
2310 call->inst.flags |= MONO_INST_HAS_METHOD;
2311 call->inst.inst_left = this;
2314 int vtable_reg, slot_reg, this_reg;
2316 this_reg = this->dreg;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2322 /* Make a call to delegate->invoke_impl */
2323 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 call->inst.inst_basereg = this_reg;
2325 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2326 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2328 return (MonoInst*)call;
2332 if ((!cfg->compile_aot || enable_for_aot) &&
2333 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2334 (MONO_METHOD_IS_FINAL (method) &&
2335 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2336 !(method->klass->marshalbyref && context_used)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2349 if (!method->string_ctor)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2352 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2359 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2367 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2368 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2370 return (MonoInst*)call;
2373 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2375 vtable_reg = alloc_preg (cfg);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2377 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2379 #ifdef MONO_ARCH_HAVE_IMT
2381 guint32 imt_slot = mono_method_get_imt_slot (method);
2382 emit_imt_argument (cfg, call, imt_arg);
2383 slot_reg = vtable_reg;
2384 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2387 if (slot_reg == -1) {
2388 slot_reg = alloc_preg (cfg);
2389 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2390 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2393 slot_reg = vtable_reg;
2394 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2395 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2396 #ifdef MONO_ARCH_HAVE_IMT
2398 g_assert (mono_method_signature (method)->generic_param_count);
2399 emit_imt_argument (cfg, call, imt_arg);
2404 call->inst.sreg1 = slot_reg;
2405 call->virtual = TRUE;
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2414 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2415 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2422 rgctx_reg = mono_alloc_preg (cfg);
2423 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2425 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2427 call = (MonoCallInst*)ins;
2429 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2435 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2437 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst*
2472 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2473 MonoMethodSignature *sig, MonoInst **args)
2475 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2482 if (cfg->abs_patches == NULL)
2483 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2484 g_hash_table_insert (cfg->abs_patches, ji, ji);
2485 ins = mono_emit_native_call (cfg, ji, sig, args);
2486 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2491 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2494 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2502 case OP_LOADI1_MEMBASE:
2503 widen_op = OP_ICONV_TO_I1;
2505 case OP_LOADU1_MEMBASE:
2506 widen_op = OP_ICONV_TO_U1;
2508 case OP_LOADI2_MEMBASE:
2509 widen_op = OP_ICONV_TO_I2;
2511 case OP_LOADU2_MEMBASE:
2512 widen_op = OP_ICONV_TO_U2;
2518 if (widen_op != -1) {
2519 int dreg = alloc_preg (cfg);
2522 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2523 widen->type = ins->type;
2533 get_memcpy_method (void)
2535 static MonoMethod *memcpy_method = NULL;
2536 if (!memcpy_method) {
2537 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method;
2545 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2547 MonoClassField *field;
2548 gpointer iter = NULL;
2550 while ((field = mono_class_get_fields (klass, &iter))) {
2553 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2555 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2556 if (mono_type_is_reference (field->type)) {
2557 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2558 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2560 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2561 MonoClass *field_class = mono_class_from_mono_type (field->type);
2562 if (field_class->has_references)
2563 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2569 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2571 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2572 unsigned need_wb = 0;
2577 /*types with references can't have alignment smaller than sizeof(void*) */
2578 if (align < SIZEOF_VOID_P)
2581 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2582 if (size > 32 * SIZEOF_VOID_P)
2585 create_write_barrier_bitmap (klass, &need_wb, 0);
2587 /* We don't unroll more than 5 stores to avoid code bloat. */
2588 if (size > 5 * SIZEOF_VOID_P) {
2589 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2590 size += (SIZEOF_VOID_P - 1);
2591 size &= ~(SIZEOF_VOID_P - 1);
2593 EMIT_NEW_ICONST (cfg, iargs [2], size);
2594 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2595 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2599 destreg = iargs [0]->dreg;
2600 srcreg = iargs [1]->dreg;
2603 dest_ptr_reg = alloc_preg (cfg);
2604 tmp_reg = alloc_preg (cfg);
2607 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2609 while (size >= SIZEOF_VOID_P) {
2610 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2611 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2613 if (need_wb & 0x1) {
2614 MonoInst *dummy_use;
2616 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2617 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2619 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2620 dummy_use->sreg1 = dest_ptr_reg;
2621 MONO_ADD_INS (cfg->cbb, dummy_use);
2625 offset += SIZEOF_VOID_P;
2626 size -= SIZEOF_VOID_P;
2629 /*tmp += sizeof (void*)*/
2630 if (size >= SIZEOF_VOID_P) {
2631 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2632 MONO_ADD_INS (cfg->cbb, iargs [0]);
2636 /* Those cannot be references since size < sizeof (void*) */
2638 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2646 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2653 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2662 * Emit code to copy a valuetype of type @klass whose address is stored in
2663 * @src->dreg to memory whose address is stored at @dest->dreg.
2666 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2668 MonoInst *iargs [4];
2671 MonoMethod *memcpy_method;
2675 * This check breaks with spilled vars... need to handle it during verification anyway.
2676 * g_assert (klass && klass == src->klass && klass == dest->klass);
2680 n = mono_class_native_size (klass, &align);
2682 n = mono_class_value_size (klass, &align);
2684 /* if native is true there should be no references in the struct */
2685 if (cfg->gen_write_barriers && klass->has_references && !native) {
2686 /* Avoid barriers when storing to the stack */
2687 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2688 (dest->opcode == OP_LDADDR))) {
2689 int context_used = 0;
2694 if (cfg->generic_sharing_context)
2695 context_used = mono_class_check_context_used (klass);
2697 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2698 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2700 } else if (context_used) {
2701 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2703 if (cfg->compile_aot) {
2704 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2706 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2707 mono_class_compute_gc_descriptor (klass);
2711 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2716 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2717 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2718 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2722 EMIT_NEW_ICONST (cfg, iargs [2], n);
2724 memcpy_method = get_memcpy_method ();
2725 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2730 get_memset_method (void)
2732 static MonoMethod *memset_method = NULL;
2733 if (!memset_method) {
2734 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2736 g_error ("Old corlib found. Install a new one");
2738 return memset_method;
2742 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2744 MonoInst *iargs [3];
2747 MonoMethod *memset_method;
2749 /* FIXME: Optimize this for the case when dest is an LDADDR */
2751 mono_class_init (klass);
2752 n = mono_class_value_size (klass, &align);
2754 if (n <= sizeof (gpointer) * 5) {
2755 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2758 memset_method = get_memset_method ();
2760 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2761 EMIT_NEW_ICONST (cfg, iargs [2], n);
2762 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2767 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2769 MonoInst *this = NULL;
2771 g_assert (cfg->generic_sharing_context);
2773 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2774 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2775 !method->klass->valuetype)
2776 EMIT_NEW_ARGLOAD (cfg, this, 0);
2778 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2779 MonoInst *mrgctx_loc, *mrgctx_var;
2782 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2784 mrgctx_loc = mono_get_vtable_var (cfg);
2785 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2788 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2789 MonoInst *vtable_loc, *vtable_var;
2793 vtable_loc = mono_get_vtable_var (cfg);
2794 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2796 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2797 MonoInst *mrgctx_var = vtable_var;
2800 vtable_reg = alloc_preg (cfg);
2801 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2802 vtable_var->type = STACK_PTR;
2808 int vtable_reg, res_reg;
2810 vtable_reg = alloc_preg (cfg);
2811 res_reg = alloc_preg (cfg);
2812 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2817 static MonoJumpInfoRgctxEntry *
2818 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2820 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2821 res->method = method;
2822 res->in_mrgctx = in_mrgctx;
2823 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2824 res->data->type = patch_type;
2825 res->data->data.target = patch_data;
2826 res->info_type = info_type;
2831 static inline MonoInst*
2832 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2834 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2838 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2839 MonoClass *klass, int rgctx_type)
2841 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2842 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2844 return emit_rgctx_fetch (cfg, rgctx, entry);
2848 * emit_get_rgctx_method:
2850 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2851 * normal constants, else emit a load from the rgctx.
2854 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2855 MonoMethod *cmethod, int rgctx_type)
2857 if (!context_used) {
2860 switch (rgctx_type) {
2861 case MONO_RGCTX_INFO_METHOD:
2862 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2864 case MONO_RGCTX_INFO_METHOD_RGCTX:
2865 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2868 g_assert_not_reached ();
2871 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2872 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2874 return emit_rgctx_fetch (cfg, rgctx, entry);
2879 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2880 MonoClassField *field, int rgctx_type)
2882 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2883 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2885 return emit_rgctx_fetch (cfg, rgctx, entry);
2889 * On return the caller must check @klass for load errors.
2892 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2894 MonoInst *vtable_arg;
2896 int context_used = 0;
2898 if (cfg->generic_sharing_context)
2899 context_used = mono_class_check_context_used (klass);
2902 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2903 klass, MONO_RGCTX_INFO_VTABLE);
2905 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2909 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2912 if (COMPILE_LLVM (cfg))
2913 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2915 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2916 #ifdef MONO_ARCH_VTABLE_REG
2917 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2918 cfg->uses_vtable_reg = TRUE;
2925 * On return the caller must check @array_class for load errors
2928 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2930 int vtable_reg = alloc_preg (cfg);
2931 int context_used = 0;
2933 if (cfg->generic_sharing_context)
2934 context_used = mono_class_check_context_used (array_class);
2936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2938 if (cfg->opt & MONO_OPT_SHARED) {
2939 int class_reg = alloc_preg (cfg);
2940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2941 if (cfg->compile_aot) {
2942 int klass_reg = alloc_preg (cfg);
2943 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2944 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2948 } else if (context_used) {
2949 MonoInst *vtable_ins;
2951 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2952 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2954 if (cfg->compile_aot) {
2958 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2960 vt_reg = alloc_preg (cfg);
2961 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2962 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2965 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2967 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2971 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2975 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2977 if (mini_get_debug_options ()->better_cast_details) {
2978 int to_klass_reg = alloc_preg (cfg);
2979 int vtable_reg = alloc_preg (cfg);
2980 int klass_reg = alloc_preg (cfg);
2981 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2984 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2988 MONO_ADD_INS (cfg->cbb, tls_get);
2989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2990 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2992 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2993 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2994 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2999 reset_cast_details (MonoCompile *cfg)
3001 /* Reset the variables holding the cast details */
3002 if (mini_get_debug_options ()->better_cast_details) {
3003 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3005 MONO_ADD_INS (cfg->cbb, tls_get);
3006 /* It is enough to reset the from field */
3007 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3012 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3013 * generic code is generated.
3016 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3018 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3021 MonoInst *rgctx, *addr;
3023 /* FIXME: What if the class is shared? We might not
3024 have to get the address of the method from the
3026 addr = emit_get_rgctx_method (cfg, context_used, method,
3027 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3029 rgctx = emit_get_rgctx (cfg, method, context_used);
3031 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3033 return mono_emit_method_call (cfg, method, &val, NULL);
3038 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3042 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3043 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3044 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3045 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3047 obj_reg = sp [0]->dreg;
3048 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3049 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3051 /* FIXME: generics */
3052 g_assert (klass->rank == 0);
3055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3056 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3062 MonoInst *element_class;
3064 /* This assertion is from the unboxcast insn */
3065 g_assert (klass->rank == 0);
3067 element_class = emit_get_rgctx_klass (cfg, context_used,
3068 klass->element_class, MONO_RGCTX_INFO_KLASS);
3070 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3071 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3073 save_cast_details (cfg, klass->element_class, obj_reg);
3074 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3075 reset_cast_details (cfg);
3078 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3079 MONO_ADD_INS (cfg->cbb, add);
3080 add->type = STACK_MP;
3087 * Returns NULL and set the cfg exception on error.
3090 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3092 MonoInst *iargs [2];
3098 MonoInst *iargs [2];
3101 FIXME: we cannot get managed_alloc here because we can't get
3102 the class's vtable (because it's not a closed class)
3104 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3105 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3108 if (cfg->opt & MONO_OPT_SHARED)
3109 rgctx_info = MONO_RGCTX_INFO_KLASS;
3111 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3112 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3114 if (cfg->opt & MONO_OPT_SHARED) {
3115 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3117 alloc_ftn = mono_object_new;
3120 alloc_ftn = mono_object_new_specific;
3123 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3126 if (cfg->opt & MONO_OPT_SHARED) {
3127 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3128 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3130 alloc_ftn = mono_object_new;
3131 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3132 /* This happens often in argument checking code, eg. throw new FooException... */
3133 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3134 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3135 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3137 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3138 MonoMethod *managed_alloc = NULL;
3142 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3143 cfg->exception_ptr = klass;
3147 #ifndef MONO_CROSS_COMPILE
3148 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3151 if (managed_alloc) {
3152 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3153 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3155 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3157 guint32 lw = vtable->klass->instance_size;
3158 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3159 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3160 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3163 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3167 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3171 * Returns NULL and set the cfg exception on error.
3174 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3176 MonoInst *alloc, *ins;
3178 if (mono_class_is_nullable (klass)) {
3179 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3182 /* FIXME: What if the class is shared? We might not
3183 have to get the method address from the RGCTX. */
3184 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3185 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3186 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3188 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3190 return mono_emit_method_call (cfg, method, &val, NULL);
3194 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3198 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3203 // FIXME: This doesn't work yet (class libs tests fail?)
3204 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3207 * Returns NULL and set the cfg exception on error.
3210 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3212 MonoBasicBlock *is_null_bb;
3213 int obj_reg = src->dreg;
3214 int vtable_reg = alloc_preg (cfg);
3215 MonoInst *klass_inst = NULL;
3220 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3221 klass, MONO_RGCTX_INFO_KLASS);
3223 if (is_complex_isinst (klass)) {
3224 /* Complex case, handle by an icall */
3230 args [1] = klass_inst;
3232 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3234 /* Simple case, handled by the code below */
3238 NEW_BBLOCK (cfg, is_null_bb);
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3241 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3243 save_cast_details (cfg, klass, obj_reg);
3245 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3247 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3249 int klass_reg = alloc_preg (cfg);
3251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3253 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3254 /* the remoting code is broken, access the class for now */
3255 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3256 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3258 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3259 cfg->exception_ptr = klass;
3262 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3264 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3267 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3269 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3270 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3274 MONO_START_BB (cfg, is_null_bb);
3276 reset_cast_details (cfg);
3282 * Returns NULL and set the cfg exception on error.
3285 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3288 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3289 int obj_reg = src->dreg;
3290 int vtable_reg = alloc_preg (cfg);
3291 int res_reg = alloc_preg (cfg);
3292 MonoInst *klass_inst = NULL;
3295 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3297 if (is_complex_isinst (klass)) {
3300 /* Complex case, handle by an icall */
3306 args [1] = klass_inst;
3308 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3310 /* Simple case, the code below can handle it */
3314 NEW_BBLOCK (cfg, is_null_bb);
3315 NEW_BBLOCK (cfg, false_bb);
3316 NEW_BBLOCK (cfg, end_bb);
3318 /* Do the assignment at the beginning, so the other assignment can be if converted */
3319 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3320 ins->type = STACK_OBJ;
3323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3324 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3328 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3329 g_assert (!context_used);
3330 /* the is_null_bb target simply copies the input register to the output */
3331 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3333 int klass_reg = alloc_preg (cfg);
3336 int rank_reg = alloc_preg (cfg);
3337 int eclass_reg = alloc_preg (cfg);
3339 g_assert (!context_used);
3340 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3341 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3342 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3345 if (klass->cast_class == mono_defaults.object_class) {
3346 int parent_reg = alloc_preg (cfg);
3347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3348 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3349 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3351 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3352 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3353 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3355 } else if (klass->cast_class == mono_defaults.enum_class) {
3356 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3358 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3359 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3361 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3362 /* Check that the object is a vector too */
3363 int bounds_reg = alloc_preg (cfg);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3369 /* the is_null_bb target simply copies the input register to the output */
3370 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3372 } else if (mono_class_is_nullable (klass)) {
3373 g_assert (!context_used);
3374 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3375 /* the is_null_bb target simply copies the input register to the output */
3376 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3378 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3379 g_assert (!context_used);
3380 /* the remoting code is broken, access the class for now */
3381 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3382 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3384 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3385 cfg->exception_ptr = klass;
3388 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3391 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3393 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3394 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3397 /* the is_null_bb target simply copies the input register to the output */
3398 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3403 MONO_START_BB (cfg, false_bb);
3405 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3408 MONO_START_BB (cfg, is_null_bb);
3410 MONO_START_BB (cfg, end_bb);
3416 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3418 /* This opcode takes as input an object reference and a class, and returns:
3419 0) if the object is an instance of the class,
3420 1) if the object is not instance of the class,
3421 2) if the object is a proxy whose type cannot be determined */
3424 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3425 int obj_reg = src->dreg;
3426 int dreg = alloc_ireg (cfg);
3428 int klass_reg = alloc_preg (cfg);
3430 NEW_BBLOCK (cfg, true_bb);
3431 NEW_BBLOCK (cfg, false_bb);
3432 NEW_BBLOCK (cfg, false2_bb);
3433 NEW_BBLOCK (cfg, end_bb);
3434 NEW_BBLOCK (cfg, no_proxy_bb);
3436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3437 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3439 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3440 NEW_BBLOCK (cfg, interface_fail_bb);
3442 tmp_reg = alloc_preg (cfg);
3443 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3444 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3445 MONO_START_BB (cfg, interface_fail_bb);
3446 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3448 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3450 tmp_reg = alloc_preg (cfg);
3451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3452 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3455 tmp_reg = alloc_preg (cfg);
3456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3459 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3460 tmp_reg = alloc_preg (cfg);
3461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3464 tmp_reg = alloc_preg (cfg);
3465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3467 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3469 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3472 MONO_START_BB (cfg, no_proxy_bb);
3474 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3477 MONO_START_BB (cfg, false_bb);
3479 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3480 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3482 MONO_START_BB (cfg, false2_bb);
3484 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3485 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3487 MONO_START_BB (cfg, true_bb);
3489 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3491 MONO_START_BB (cfg, end_bb);
3494 MONO_INST_NEW (cfg, ins, OP_ICONST);
3496 ins->type = STACK_I4;
3502 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3504 /* This opcode takes as input an object reference and a class, and returns:
3505 0) if the object is an instance of the class,
3506 1) if the object is a proxy whose type cannot be determined
3507 an InvalidCastException exception is thrown otherwhise*/
3510 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3511 int obj_reg = src->dreg;
3512 int dreg = alloc_ireg (cfg);
3513 int tmp_reg = alloc_preg (cfg);
3514 int klass_reg = alloc_preg (cfg);
3516 NEW_BBLOCK (cfg, end_bb);
3517 NEW_BBLOCK (cfg, ok_result_bb);
3519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3522 save_cast_details (cfg, klass, obj_reg);
3524 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3525 NEW_BBLOCK (cfg, interface_fail_bb);
3527 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3528 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3529 MONO_START_BB (cfg, interface_fail_bb);
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3532 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3534 tmp_reg = alloc_preg (cfg);
3535 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3537 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3539 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3540 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3543 NEW_BBLOCK (cfg, no_proxy_bb);
3545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3547 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3549 tmp_reg = alloc_preg (cfg);
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3553 tmp_reg = alloc_preg (cfg);
3554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3558 NEW_BBLOCK (cfg, fail_1_bb);
3560 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3562 MONO_START_BB (cfg, fail_1_bb);
3564 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3567 MONO_START_BB (cfg, no_proxy_bb);
3569 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3572 MONO_START_BB (cfg, ok_result_bb);
3574 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3576 MONO_START_BB (cfg, end_bb);
3579 MONO_INST_NEW (cfg, ins, OP_ICONST);
3581 ins->type = STACK_I4;
3587 * Returns NULL and set the cfg exception on error.
3589 static G_GNUC_UNUSED MonoInst*
3590 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3592 gpointer *trampoline;
3593 MonoInst *obj, *method_ins, *tramp_ins;
3597 obj = handle_alloc (cfg, klass, FALSE, 0);
3601 /* Inline the contents of mono_delegate_ctor */
3603 /* Set target field */
3604 /* Optimize away setting of NULL target */
3605 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3606 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3608 /* Set method field */
3609 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3610 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3613 * To avoid looking up the compiled code belonging to the target method
3614 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3615 * store it, and we fill it after the method has been compiled.
3617 if (!cfg->compile_aot && !method->dynamic) {
3618 MonoInst *code_slot_ins;
3621 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3623 domain = mono_domain_get ();
3624 mono_domain_lock (domain);
3625 if (!domain_jit_info (domain)->method_code_hash)
3626 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3627 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3629 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3630 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3632 mono_domain_unlock (domain);
3634 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3636 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3639 /* Set invoke_impl field */
3640 if (cfg->compile_aot) {
3641 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3643 trampoline = mono_create_delegate_trampoline (klass);
3644 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3646 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3648 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3654 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3656 MonoJitICallInfo *info;
3658 /* Need to register the icall so it gets an icall wrapper */
3659 info = mono_get_array_new_va_icall (rank);
3661 cfg->flags |= MONO_CFG_HAS_VARARGS;
3663 /* mono_array_new_va () needs a vararg calling convention */
3664 cfg->disable_llvm = TRUE;
3666 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3667 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3671 mono_emit_load_got_addr (MonoCompile *cfg)
3673 MonoInst *getaddr, *dummy_use;
3675 if (!cfg->got_var || cfg->got_var_allocated)
3678 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3679 getaddr->dreg = cfg->got_var->dreg;
3681 /* Add it to the start of the first bblock */
3682 if (cfg->bb_entry->code) {
3683 getaddr->next = cfg->bb_entry->code;
3684 cfg->bb_entry->code = getaddr;
3687 MONO_ADD_INS (cfg->bb_entry, getaddr);
3689 cfg->got_var_allocated = TRUE;
3692 * Add a dummy use to keep the got_var alive, since real uses might
3693 * only be generated by the back ends.
3694 * Add it to end_bblock, so the variable's lifetime covers the whole
3696 * It would be better to make the usage of the got var explicit in all
3697 * cases when the backend needs it (i.e. calls, throw etc.), so this
3698 * wouldn't be needed.
3700 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3701 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3704 static int inline_limit;
3705 static gboolean inline_limit_inited;
3708 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3710 MonoMethodHeaderSummary header;
3712 #ifdef MONO_ARCH_SOFT_FLOAT
3713 MonoMethodSignature *sig = mono_method_signature (method);
3717 if (cfg->generic_sharing_context)
3720 if (cfg->inline_depth > 10)
3723 #ifdef MONO_ARCH_HAVE_LMF_OPS
3724 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3725 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3726 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3731 if (!mono_method_get_header_summary (method, &header))
3734 /*runtime, icall and pinvoke are checked by summary call*/
3735 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3736 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3737 (method->klass->marshalbyref) ||
3741 /* also consider num_locals? */
3742 /* Do the size check early to avoid creating vtables */
3743 if (!inline_limit_inited) {
3744 if (getenv ("MONO_INLINELIMIT"))
3745 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3747 inline_limit = INLINE_LENGTH_LIMIT;
3748 inline_limit_inited = TRUE;
3750 if (header.code_size >= inline_limit)
3754 * if we can initialize the class of the method right away, we do,
3755 * otherwise we don't allow inlining if the class needs initialization,
3756 * since it would mean inserting a call to mono_runtime_class_init()
3757 * inside the inlined code
3759 if (!(cfg->opt & MONO_OPT_SHARED)) {
3760 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3761 if (cfg->run_cctors && method->klass->has_cctor) {
3762 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3763 if (!method->klass->runtime_info)
3764 /* No vtable created yet */
3766 vtable = mono_class_vtable (cfg->domain, method->klass);
3769 /* This makes so that inline cannot trigger */
3770 /* .cctors: too many apps depend on them */
3771 /* running with a specific order... */
3772 if (! vtable->initialized)
3774 mono_runtime_class_init (vtable);
3776 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3777 if (!method->klass->runtime_info)
3778 /* No vtable created yet */
3780 vtable = mono_class_vtable (cfg->domain, method->klass);
3783 if (!vtable->initialized)
3788 * If we're compiling for shared code
3789 * the cctor will need to be run at aot method load time, for example,
3790 * or at the end of the compilation of the inlining method.
3792 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3797 * CAS - do not inline methods with declarative security
3798 * Note: this has to be before any possible return TRUE;
3800 if (mono_method_has_declsec (method))
3803 #ifdef MONO_ARCH_SOFT_FLOAT
3805 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3807 for (i = 0; i < sig->param_count; ++i)
3808 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3816 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3818 if (vtable->initialized && !cfg->compile_aot)
3821 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3824 if (!mono_class_needs_cctor_run (vtable->klass, method))
3827 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3828 /* The initialization is already done before the method is called */
3835 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3839 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3841 mono_class_init (klass);
3842 size = mono_class_array_element_size (klass);
3844 mult_reg = alloc_preg (cfg);
3845 array_reg = arr->dreg;
3846 index_reg = index->dreg;
3848 #if SIZEOF_REGISTER == 8
3849 /* The array reg is 64 bits but the index reg is only 32 */
3850 if (COMPILE_LLVM (cfg)) {
3852 index2_reg = index_reg;
3854 index2_reg = alloc_preg (cfg);
3855 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3858 if (index->type == STACK_I8) {
3859 index2_reg = alloc_preg (cfg);
3860 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3862 index2_reg = index_reg;
3867 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3869 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3870 if (size == 1 || size == 2 || size == 4 || size == 8) {
3871 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3873 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3874 ins->type = STACK_PTR;
3880 add_reg = alloc_preg (cfg);
3882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3883 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3884 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3885 ins->type = STACK_PTR;
3886 MONO_ADD_INS (cfg->cbb, ins);
3891 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3893 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3895 int bounds_reg = alloc_preg (cfg);
3896 int add_reg = alloc_preg (cfg);
3897 int mult_reg = alloc_preg (cfg);
3898 int mult2_reg = alloc_preg (cfg);
3899 int low1_reg = alloc_preg (cfg);
3900 int low2_reg = alloc_preg (cfg);
3901 int high1_reg = alloc_preg (cfg);
3902 int high2_reg = alloc_preg (cfg);
3903 int realidx1_reg = alloc_preg (cfg);
3904 int realidx2_reg = alloc_preg (cfg);
3905 int sum_reg = alloc_preg (cfg);
3910 mono_class_init (klass);
3911 size = mono_class_array_element_size (klass);
3913 index1 = index_ins1->dreg;
3914 index2 = index_ins2->dreg;
3916 /* range checking */
3917 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3918 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3921 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3922 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3923 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3924 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3925 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3926 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3929 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3930 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3931 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3932 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3933 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3934 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3936 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3937 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3938 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3939 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3940 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3942 ins->type = STACK_MP;
3944 MONO_ADD_INS (cfg->cbb, ins);
3951 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3955 MonoMethod *addr_method;
3958 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3961 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3963 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3964 /* emit_ldelema_2 depends on OP_LMUL */
3965 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3966 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3970 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3971 addr_method = mono_marshal_get_array_address (rank, element_size);
3972 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3977 static MonoBreakPolicy
3978 always_insert_breakpoint (MonoMethod *method)
3980 return MONO_BREAK_POLICY_ALWAYS;
3983 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3986 * mono_set_break_policy:
3987 * policy_callback: the new callback function
3989 * Allow embedders to decide wherther to actually obey breakpoint instructions
3990 * (both break IL instructions and Debugger.Break () method calls), for example
3991 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3992 * untrusted or semi-trusted code.
3994 * @policy_callback will be called every time a break point instruction needs to
3995 * be inserted with the method argument being the method that calls Debugger.Break()
3996 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3997 * if it wants the breakpoint to not be effective in the given method.
3998 * #MONO_BREAK_POLICY_ALWAYS is the default.
4001 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4003 if (policy_callback)
4004 break_policy_func = policy_callback;
4006 break_policy_func = always_insert_breakpoint;
4010 should_insert_brekpoint (MonoMethod *method) {
4011 switch (break_policy_func (method)) {
4012 case MONO_BREAK_POLICY_ALWAYS:
4014 case MONO_BREAK_POLICY_NEVER:
4016 case MONO_BREAK_POLICY_ON_DBG:
4017 return mono_debug_using_mono_debugger ();
4019 g_warning ("Incorrect value returned from break policy callback");
4024 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4026 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4028 MonoInst *addr, *store, *load;
4029 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4031 /* the bounds check is already done by the callers */
4032 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4034 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4035 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4037 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4038 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4044 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4046 MonoInst *ins = NULL;
4047 #ifdef MONO_ARCH_SIMD_INTRINSICS
4048 if (cfg->opt & MONO_OPT_SIMD) {
4049 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4059 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4061 MonoInst *ins = NULL;
4063 static MonoClass *runtime_helpers_class = NULL;
4064 if (! runtime_helpers_class)
4065 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4066 "System.Runtime.CompilerServices", "RuntimeHelpers");
4068 if (cmethod->klass == mono_defaults.string_class) {
4069 if (strcmp (cmethod->name, "get_Chars") == 0) {
4070 int dreg = alloc_ireg (cfg);
4071 int index_reg = alloc_preg (cfg);
4072 int mult_reg = alloc_preg (cfg);
4073 int add_reg = alloc_preg (cfg);
4075 #if SIZEOF_REGISTER == 8
4076 /* The array reg is 64 bits but the index reg is only 32 */
4077 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4079 index_reg = args [1]->dreg;
4081 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4083 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4084 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4085 add_reg = ins->dreg;
4086 /* Avoid a warning */
4088 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4092 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4093 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4094 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4096 type_from_op (ins, NULL, NULL);
4098 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4099 int dreg = alloc_ireg (cfg);
4100 /* Decompose later to allow more optimizations */
4101 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4102 ins->type = STACK_I4;
4103 cfg->cbb->has_array_access = TRUE;
4104 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4107 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4108 int mult_reg = alloc_preg (cfg);
4109 int add_reg = alloc_preg (cfg);
4111 /* The corlib functions check for oob already. */
4112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4113 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4114 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4115 return cfg->cbb->last_ins;
4118 } else if (cmethod->klass == mono_defaults.object_class) {
4120 if (strcmp (cmethod->name, "GetType") == 0) {
4121 int dreg = alloc_preg (cfg);
4122 int vt_reg = alloc_preg (cfg);
4123 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4124 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4125 type_from_op (ins, NULL, NULL);
4128 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4129 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4130 int dreg = alloc_ireg (cfg);
4131 int t1 = alloc_ireg (cfg);
4133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4134 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4135 ins->type = STACK_I4;
4139 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4140 MONO_INST_NEW (cfg, ins, OP_NOP);
4141 MONO_ADD_INS (cfg->cbb, ins);
4145 } else if (cmethod->klass == mono_defaults.array_class) {
4146 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4147 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4148 if (cmethod->name [0] != 'g')
4151 if (strcmp (cmethod->name, "get_Rank") == 0) {
4152 int dreg = alloc_ireg (cfg);
4153 int vtable_reg = alloc_preg (cfg);
4154 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4155 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4156 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4157 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4158 type_from_op (ins, NULL, NULL);
4161 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4162 int dreg = alloc_ireg (cfg);
4164 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4165 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4166 type_from_op (ins, NULL, NULL);
4171 } else if (cmethod->klass == runtime_helpers_class) {
4173 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4174 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4178 } else if (cmethod->klass == mono_defaults.thread_class) {
4179 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4180 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4181 MONO_ADD_INS (cfg->cbb, ins);
4183 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4184 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4185 MONO_ADD_INS (cfg->cbb, ins);
4188 } else if (cmethod->klass == mono_defaults.monitor_class) {
4189 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4190 if (strcmp (cmethod->name, "Enter") == 0) {
4193 if (COMPILE_LLVM (cfg)) {
4195 * Pass the argument normally, the LLVM backend will handle the
4196 * calling convention problems.
4198 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4200 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4201 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4202 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4203 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4206 return (MonoInst*)call;
4207 } else if (strcmp (cmethod->name, "Exit") == 0) {
4210 if (COMPILE_LLVM (cfg)) {
4211 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4213 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4214 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4215 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4216 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4219 return (MonoInst*)call;
4221 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4222 MonoMethod *fast_method = NULL;
4224 /* Avoid infinite recursion */
4225 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4226 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4227 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4230 if (strcmp (cmethod->name, "Enter") == 0 ||
4231 strcmp (cmethod->name, "Exit") == 0)
4232 fast_method = mono_monitor_get_fast_path (cmethod);
4236 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4238 } else if (cmethod->klass->image == mono_defaults.corlib &&
4239 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4240 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4243 #if SIZEOF_REGISTER == 8
4244 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4245 /* 64 bit reads are already atomic */
4246 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4247 ins->dreg = mono_alloc_preg (cfg);
4248 ins->inst_basereg = args [0]->dreg;
4249 ins->inst_offset = 0;
4250 MONO_ADD_INS (cfg->cbb, ins);
4254 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4255 if (strcmp (cmethod->name, "Increment") == 0) {
4256 MonoInst *ins_iconst;
4259 if (fsig->params [0]->type == MONO_TYPE_I4)
4260 opcode = OP_ATOMIC_ADD_NEW_I4;
4261 #if SIZEOF_REGISTER == 8
4262 else if (fsig->params [0]->type == MONO_TYPE_I8)
4263 opcode = OP_ATOMIC_ADD_NEW_I8;
4266 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4267 ins_iconst->inst_c0 = 1;
4268 ins_iconst->dreg = mono_alloc_ireg (cfg);
4269 MONO_ADD_INS (cfg->cbb, ins_iconst);
4271 MONO_INST_NEW (cfg, ins, opcode);
4272 ins->dreg = mono_alloc_ireg (cfg);
4273 ins->inst_basereg = args [0]->dreg;
4274 ins->inst_offset = 0;
4275 ins->sreg2 = ins_iconst->dreg;
4276 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4277 MONO_ADD_INS (cfg->cbb, ins);
4279 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4280 MonoInst *ins_iconst;
4283 if (fsig->params [0]->type == MONO_TYPE_I4)
4284 opcode = OP_ATOMIC_ADD_NEW_I4;
4285 #if SIZEOF_REGISTER == 8
4286 else if (fsig->params [0]->type == MONO_TYPE_I8)
4287 opcode = OP_ATOMIC_ADD_NEW_I8;
4290 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4291 ins_iconst->inst_c0 = -1;
4292 ins_iconst->dreg = mono_alloc_ireg (cfg);
4293 MONO_ADD_INS (cfg->cbb, ins_iconst);
4295 MONO_INST_NEW (cfg, ins, opcode);
4296 ins->dreg = mono_alloc_ireg (cfg);
4297 ins->inst_basereg = args [0]->dreg;
4298 ins->inst_offset = 0;
4299 ins->sreg2 = ins_iconst->dreg;
4300 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4301 MONO_ADD_INS (cfg->cbb, ins);
4303 } else if (strcmp (cmethod->name, "Add") == 0) {
4306 if (fsig->params [0]->type == MONO_TYPE_I4)
4307 opcode = OP_ATOMIC_ADD_NEW_I4;
4308 #if SIZEOF_REGISTER == 8
4309 else if (fsig->params [0]->type == MONO_TYPE_I8)
4310 opcode = OP_ATOMIC_ADD_NEW_I8;
4314 MONO_INST_NEW (cfg, ins, opcode);
4315 ins->dreg = mono_alloc_ireg (cfg);
4316 ins->inst_basereg = args [0]->dreg;
4317 ins->inst_offset = 0;
4318 ins->sreg2 = args [1]->dreg;
4319 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4320 MONO_ADD_INS (cfg->cbb, ins);
4323 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4325 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4326 if (strcmp (cmethod->name, "Exchange") == 0) {
4328 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4330 if (fsig->params [0]->type == MONO_TYPE_I4)
4331 opcode = OP_ATOMIC_EXCHANGE_I4;
4332 #if SIZEOF_REGISTER == 8
4333 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4334 (fsig->params [0]->type == MONO_TYPE_I))
4335 opcode = OP_ATOMIC_EXCHANGE_I8;
4337 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4338 opcode = OP_ATOMIC_EXCHANGE_I4;
4343 MONO_INST_NEW (cfg, ins, opcode);
4344 ins->dreg = mono_alloc_ireg (cfg);
4345 ins->inst_basereg = args [0]->dreg;
4346 ins->inst_offset = 0;
4347 ins->sreg2 = args [1]->dreg;
4348 MONO_ADD_INS (cfg->cbb, ins);
4350 switch (fsig->params [0]->type) {
4352 ins->type = STACK_I4;
4356 ins->type = STACK_I8;
4358 case MONO_TYPE_OBJECT:
4359 ins->type = STACK_OBJ;
4362 g_assert_not_reached ();
4365 if (cfg->gen_write_barriers && is_ref) {
4366 MonoInst *dummy_use;
4367 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4368 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4369 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4372 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4374 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4375 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4377 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4378 if (fsig->params [1]->type == MONO_TYPE_I4)
4380 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4381 size = sizeof (gpointer);
4382 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4385 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4386 ins->dreg = alloc_ireg (cfg);
4387 ins->sreg1 = args [0]->dreg;
4388 ins->sreg2 = args [1]->dreg;
4389 ins->sreg3 = args [2]->dreg;
4390 ins->type = STACK_I4;
4391 MONO_ADD_INS (cfg->cbb, ins);
4392 } else if (size == 8) {
4393 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4394 ins->dreg = alloc_ireg (cfg);
4395 ins->sreg1 = args [0]->dreg;
4396 ins->sreg2 = args [1]->dreg;
4397 ins->sreg3 = args [2]->dreg;
4398 ins->type = STACK_I8;
4399 MONO_ADD_INS (cfg->cbb, ins);
4401 /* g_assert_not_reached (); */
4403 if (cfg->gen_write_barriers && is_ref) {
4404 MonoInst *dummy_use;
4405 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4406 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4407 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4410 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4414 } else if (cmethod->klass->image == mono_defaults.corlib) {
4415 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4416 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4417 if (should_insert_brekpoint (cfg->method))
4418 MONO_INST_NEW (cfg, ins, OP_BREAK);
4420 MONO_INST_NEW (cfg, ins, OP_NOP);
4421 MONO_ADD_INS (cfg->cbb, ins);
4424 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4425 && strcmp (cmethod->klass->name, "Environment") == 0) {
4427 EMIT_NEW_ICONST (cfg, ins, 1);
4429 EMIT_NEW_ICONST (cfg, ins, 0);
4433 } else if (cmethod->klass == mono_defaults.math_class) {
4435 * There is general branches code for Min/Max, but it does not work for
4437 * http://everything2.com/?node_id=1051618
4441 #ifdef MONO_ARCH_SIMD_INTRINSICS
4442 if (cfg->opt & MONO_OPT_SIMD) {
4443 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4449 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4453 * This entry point could be used later for arbitrary method
4456 inline static MonoInst*
4457 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4458 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4460 if (method->klass == mono_defaults.string_class) {
4461 /* managed string allocation support */
4462 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4463 MonoInst *iargs [2];
4464 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4465 MonoMethod *managed_alloc = NULL;
4467 g_assert (vtable); /*Should not fail since it System.String*/
4468 #ifndef MONO_CROSS_COMPILE
4469 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4473 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4474 iargs [1] = args [0];
4475 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4482 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4484 MonoInst *store, *temp;
4487 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4488 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4491 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4492 * would be different than the MonoInst's used to represent arguments, and
4493 * the ldelema implementation can't deal with that.
4494 * Solution: When ldelema is used on an inline argument, create a var for
4495 * it, emit ldelema on that var, and emit the saving code below in
4496 * inline_method () if needed.
4498 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4499 cfg->args [i] = temp;
4500 /* This uses cfg->args [i] which is set by the preceeding line */
4501 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4502 store->cil_code = sp [0]->cil_code;
4507 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4508 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4510 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4512 check_inline_called_method_name_limit (MonoMethod *called_method)
4515 static char *limit = NULL;
4517 if (limit == NULL) {
4518 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4520 if (limit_string != NULL)
4521 limit = limit_string;
4523 limit = (char *) "";
4526 if (limit [0] != '\0') {
4527 char *called_method_name = mono_method_full_name (called_method, TRUE);
4529 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4530 g_free (called_method_name);
4532 //return (strncmp_result <= 0);
4533 return (strncmp_result == 0);
4540 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4542 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4545 static char *limit = NULL;
4547 if (limit == NULL) {
4548 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4549 if (limit_string != NULL) {
4550 limit = limit_string;
4552 limit = (char *) "";
4556 if (limit [0] != '\0') {
4557 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4559 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4560 g_free (caller_method_name);
4562 //return (strncmp_result <= 0);
4563 return (strncmp_result == 0);
4571 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4572 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4574 MonoInst *ins, *rvar = NULL;
4575 MonoMethodHeader *cheader;
4576 MonoBasicBlock *ebblock, *sbblock;
4578 MonoMethod *prev_inlined_method;
4579 MonoInst **prev_locals, **prev_args;
4580 MonoType **prev_arg_types;
4581 guint prev_real_offset;
4582 GHashTable *prev_cbb_hash;
4583 MonoBasicBlock **prev_cil_offset_to_bb;
4584 MonoBasicBlock *prev_cbb;
4585 unsigned char* prev_cil_start;
4586 guint32 prev_cil_offset_to_bb_len;
4587 MonoMethod *prev_current_method;
4588 MonoGenericContext *prev_generic_context;
4589 gboolean ret_var_set, prev_ret_var_set;
4591 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4593 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4594 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4597 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4598 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4602 if (cfg->verbose_level > 2)
4603 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4605 if (!cmethod->inline_info) {
4606 mono_jit_stats.inlineable_methods++;
4607 cmethod->inline_info = 1;
4610 /* allocate local variables */
4611 cheader = mono_method_get_header (cmethod);
4613 if (cheader == NULL || mono_loader_get_last_error ()) {
4615 mono_metadata_free_mh (cheader);
4616 mono_loader_clear_error ();
4620 /* allocate space to store the return value */
4621 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4622 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4626 prev_locals = cfg->locals;
4627 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4628 for (i = 0; i < cheader->num_locals; ++i)
4629 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4631 /* allocate start and end blocks */
4632 /* This is needed so if the inline is aborted, we can clean up */
4633 NEW_BBLOCK (cfg, sbblock);
4634 sbblock->real_offset = real_offset;
4636 NEW_BBLOCK (cfg, ebblock);
4637 ebblock->block_num = cfg->num_bblocks++;
4638 ebblock->real_offset = real_offset;
4640 prev_args = cfg->args;
4641 prev_arg_types = cfg->arg_types;
4642 prev_inlined_method = cfg->inlined_method;
4643 cfg->inlined_method = cmethod;
4644 cfg->ret_var_set = FALSE;
4645 cfg->inline_depth ++;
4646 prev_real_offset = cfg->real_offset;
4647 prev_cbb_hash = cfg->cbb_hash;
4648 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4649 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4650 prev_cil_start = cfg->cil_start;
4651 prev_cbb = cfg->cbb;
4652 prev_current_method = cfg->current_method;
4653 prev_generic_context = cfg->generic_context;
4654 prev_ret_var_set = cfg->ret_var_set;
4656 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4658 ret_var_set = cfg->ret_var_set;
4660 cfg->inlined_method = prev_inlined_method;
4661 cfg->real_offset = prev_real_offset;
4662 cfg->cbb_hash = prev_cbb_hash;
4663 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4664 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4665 cfg->cil_start = prev_cil_start;
4666 cfg->locals = prev_locals;
4667 cfg->args = prev_args;
4668 cfg->arg_types = prev_arg_types;
4669 cfg->current_method = prev_current_method;
4670 cfg->generic_context = prev_generic_context;
4671 cfg->ret_var_set = prev_ret_var_set;
4672 cfg->inline_depth --;
4674 if ((costs >= 0 && costs < 60) || inline_allways) {
4675 if (cfg->verbose_level > 2)
4676 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4678 mono_jit_stats.inlined_methods++;
4680 /* always add some code to avoid block split failures */
4681 MONO_INST_NEW (cfg, ins, OP_NOP);
4682 MONO_ADD_INS (prev_cbb, ins);
4684 prev_cbb->next_bb = sbblock;
4685 link_bblock (cfg, prev_cbb, sbblock);
4688 * Get rid of the begin and end bblocks if possible to aid local
4691 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4693 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4694 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4696 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4697 MonoBasicBlock *prev = ebblock->in_bb [0];
4698 mono_merge_basic_blocks (cfg, prev, ebblock);
4700 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4701 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4702 cfg->cbb = prev_cbb;
4710 * If the inlined method contains only a throw, then the ret var is not
4711 * set, so set it to a dummy value.
4714 static double r8_0 = 0.0;
4716 switch (rvar->type) {
4718 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4721 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4726 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4729 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4730 ins->type = STACK_R8;
4731 ins->inst_p0 = (void*)&r8_0;
4732 ins->dreg = rvar->dreg;
4733 MONO_ADD_INS (cfg->cbb, ins);
4736 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4739 g_assert_not_reached ();
4743 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4746 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4749 if (cfg->verbose_level > 2)
4750 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4751 cfg->exception_type = MONO_EXCEPTION_NONE;
4752 mono_loader_clear_error ();
4754 /* This gets rid of the newly added bblocks */
4755 cfg->cbb = prev_cbb;
4757 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4762 * Some of these comments may well be out-of-date.
4763 * Design decisions: we do a single pass over the IL code (and we do bblock
4764 * splitting/merging in the few cases when it's required: a back jump to an IL
4765 * address that was not already seen as bblock starting point).
4766 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4767 * Complex operations are decomposed in simpler ones right away. We need to let the
4768 * arch-specific code peek and poke inside this process somehow (except when the
4769 * optimizations can take advantage of the full semantic info of coarse opcodes).
4770 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4771 * MonoInst->opcode initially is the IL opcode or some simplification of that
4772 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4773 * opcode with value bigger than OP_LAST.
4774 * At this point the IR can be handed over to an interpreter, a dumb code generator
4775 * or to the optimizing code generator that will translate it to SSA form.
4777 * Profiling directed optimizations.
4778 * We may compile by default with few or no optimizations and instrument the code
4779 * or the user may indicate what methods to optimize the most either in a config file
4780 * or through repeated runs where the compiler applies offline the optimizations to
4781 * each method and then decides if it was worth it.
4784 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4785 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4786 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4787 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4788 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4789 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4790 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4791 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4793 /* offset from br.s -> br like opcodes */
4794 #define BIG_BRANCH_OFFSET 13
4797 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4799 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4801 return b == NULL || b == bb;
4805 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4807 unsigned char *ip = start;
4808 unsigned char *target;
4811 MonoBasicBlock *bblock;
4812 const MonoOpcode *opcode;
4815 cli_addr = ip - start;
4816 i = mono_opcode_value ((const guint8 **)&ip, end);
4819 opcode = &mono_opcodes [i];
4820 switch (opcode->argument) {
4821 case MonoInlineNone:
4824 case MonoInlineString:
4825 case MonoInlineType:
4826 case MonoInlineField:
4827 case MonoInlineMethod:
4830 case MonoShortInlineR:
4837 case MonoShortInlineVar:
4838 case MonoShortInlineI:
4841 case MonoShortInlineBrTarget:
4842 target = start + cli_addr + 2 + (signed char)ip [1];
4843 GET_BBLOCK (cfg, bblock, target);
4846 GET_BBLOCK (cfg, bblock, ip);
4848 case MonoInlineBrTarget:
4849 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4850 GET_BBLOCK (cfg, bblock, target);
4853 GET_BBLOCK (cfg, bblock, ip);
4855 case MonoInlineSwitch: {
4856 guint32 n = read32 (ip + 1);
4859 cli_addr += 5 + 4 * n;
4860 target = start + cli_addr;
4861 GET_BBLOCK (cfg, bblock, target);
4863 for (j = 0; j < n; ++j) {
4864 target = start + cli_addr + (gint32)read32 (ip);
4865 GET_BBLOCK (cfg, bblock, target);
4875 g_assert_not_reached ();
4878 if (i == CEE_THROW) {
4879 unsigned char *bb_start = ip - 1;
4881 /* Find the start of the bblock containing the throw */
4883 while ((bb_start >= start) && !bblock) {
4884 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4888 bblock->out_of_line = 1;
4897 static inline MonoMethod *
4898 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4902 if (m->wrapper_type != MONO_WRAPPER_NONE)
4903 return mono_method_get_wrapper_data (m, token);
4905 method = mono_get_method_full (m->klass->image, token, klass, context);
4910 static inline MonoMethod *
4911 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4913 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4915 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4921 static inline MonoClass*
4922 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4926 if (method->wrapper_type != MONO_WRAPPER_NONE)
4927 klass = mono_method_get_wrapper_data (method, token);
4929 klass = mono_class_get_full (method->klass->image, token, context);
4931 mono_class_init (klass);
4936 * Returns TRUE if the JIT should abort inlining because "callee"
4937 * is influenced by security attributes.
4940 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4944 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4948 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4949 if (result == MONO_JIT_SECURITY_OK)
4952 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4953 /* Generate code to throw a SecurityException before the actual call/link */
4954 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4957 NEW_ICONST (cfg, args [0], 4);
4958 NEW_METHODCONST (cfg, args [1], caller);
4959 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4960 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4961 /* don't hide previous results */
4962 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4963 cfg->exception_data = result;
4971 throw_exception (void)
4973 static MonoMethod *method = NULL;
4976 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4977 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4984 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4986 MonoMethod *thrower = throw_exception ();
4989 EMIT_NEW_PCONST (cfg, args [0], ex);
4990 mono_emit_method_call (cfg, thrower, args, NULL);
4994 * Return the original method is a wrapper is specified. We can only access
4995 * the custom attributes from the original method.
4998 get_original_method (MonoMethod *method)
5000 if (method->wrapper_type == MONO_WRAPPER_NONE)
5003 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5004 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5007 /* in other cases we need to find the original method */
5008 return mono_marshal_method_from_wrapper (method);
5012 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5013 MonoBasicBlock *bblock, unsigned char *ip)
5015 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5016 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5018 emit_throw_exception (cfg, ex);
5022 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5023 MonoBasicBlock *bblock, unsigned char *ip)
5025 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5026 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5028 emit_throw_exception (cfg, ex);
5032 * Check that the IL instructions at ip are the array initialization
5033 * sequence and return the pointer to the data and the size.
5036 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5039 * newarr[System.Int32]
5041 * ldtoken field valuetype ...
5042 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5044 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5045 guint32 token = read32 (ip + 7);
5046 guint32 field_token = read32 (ip + 2);
5047 guint32 field_index = field_token & 0xffffff;
5049 const char *data_ptr;
5051 MonoMethod *cmethod;
5052 MonoClass *dummy_class;
5053 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5059 *out_field_token = field_token;
5061 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5064 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5066 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5067 case MONO_TYPE_BOOLEAN:
5071 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5072 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5073 case MONO_TYPE_CHAR:
5083 return NULL; /* stupid ARM FP swapped format */
5093 if (size > mono_type_size (field->type, &dummy_align))
5096 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5097 if (!method->klass->image->dynamic) {
5098 field_index = read32 (ip + 2) & 0xffffff;
5099 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5100 data_ptr = mono_image_rva_map (method->klass->image, rva);
5101 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5102 /* for aot code we do the lookup on load */
5103 if (aot && data_ptr)
5104 return GUINT_TO_POINTER (rva);
5106 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5108 data_ptr = mono_field_get_data (field);
5116 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5118 char *method_fname = mono_method_full_name (method, TRUE);
5120 MonoMethodHeader *header = mono_method_get_header (method);
5122 if (header->code_size == 0)
5123 method_code = g_strdup ("method body is empty.");
5125 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5126 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5127 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5128 g_free (method_fname);
5129 g_free (method_code);
5130 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5134 set_exception_object (MonoCompile *cfg, MonoException *exception)
5136 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5137 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5138 cfg->exception_ptr = exception;
5142 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5146 if (cfg->generic_sharing_context)
5147 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5149 type = &klass->byval_arg;
5150 return MONO_TYPE_IS_REFERENCE (type);
5154 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5157 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5158 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5159 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5160 /* Optimize reg-reg moves away */
5162 * Can't optimize other opcodes, since sp[0] might point to
5163 * the last ins of a decomposed opcode.
5165 sp [0]->dreg = (cfg)->locals [n]->dreg;
5167 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5172 * ldloca inhibits many optimizations so try to get rid of it in common
5175 static inline unsigned char *
5176 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5185 local = read16 (ip + 2);
5189 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5190 gboolean skip = FALSE;
5192 /* From the INITOBJ case */
5193 token = read32 (ip + 2);
5194 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5195 CHECK_TYPELOAD (klass);
5196 if (generic_class_is_reference_type (cfg, klass)) {
5197 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5198 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5199 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5200 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5201 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5214 is_exception_class (MonoClass *class)
5217 if (class == mono_defaults.exception_class)
5219 class = class->parent;
5225 * mono_method_to_ir:
5227 * Translate the .net IL into linear IR.
5230 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5231 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5232 guint inline_offset, gboolean is_virtual_call)
5235 MonoInst *ins, **sp, **stack_start;
5236 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5237 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5238 MonoMethod *cmethod, *method_definition;
5239 MonoInst **arg_array;
5240 MonoMethodHeader *header;
5242 guint32 token, ins_flag;
5244 MonoClass *constrained_call = NULL;
5245 unsigned char *ip, *end, *target, *err_pos;
5246 static double r8_0 = 0.0;
5247 MonoMethodSignature *sig;
5248 MonoGenericContext *generic_context = NULL;
5249 MonoGenericContainer *generic_container = NULL;
5250 MonoType **param_types;
5251 int i, n, start_new_bblock, dreg;
5252 int num_calls = 0, inline_costs = 0;
5253 int breakpoint_id = 0;
5255 MonoBoolean security, pinvoke;
5256 MonoSecurityManager* secman = NULL;
5257 MonoDeclSecurityActions actions;
5258 GSList *class_inits = NULL;
5259 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5261 gboolean init_locals, seq_points, skip_dead_blocks;
5263 /* serialization and xdomain stuff may need access to private fields and methods */
5264 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5265 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5266 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5267 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5268 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5269 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5271 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5273 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5274 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5275 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5276 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5278 image = method->klass->image;
5279 header = mono_method_get_header (method);
5281 MonoLoaderError *error;
5283 if ((error = mono_loader_get_last_error ())) {
5284 cfg->exception_type = error->exception_type;
5286 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5287 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5289 goto exception_exit;
5291 generic_container = mono_method_get_generic_container (method);
5292 sig = mono_method_signature (method);
5293 num_args = sig->hasthis + sig->param_count;
5294 ip = (unsigned char*)header->code;
5295 cfg->cil_start = ip;
5296 end = ip + header->code_size;
5297 mono_jit_stats.cil_code_size += header->code_size;
5298 init_locals = header->init_locals;
5300 seq_points = cfg->gen_seq_points && cfg->method == method;
5303 * Methods without init_locals set could cause asserts in various passes
5308 method_definition = method;
5309 while (method_definition->is_inflated) {
5310 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5311 method_definition = imethod->declaring;
5314 /* SkipVerification is not allowed if core-clr is enabled */
5315 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5317 dont_verify_stloc = TRUE;
5320 if (!dont_verify && mini_method_verify (cfg, method_definition))
5321 goto exception_exit;
5323 if (mono_debug_using_mono_debugger ())
5324 cfg->keep_cil_nops = TRUE;
5326 if (sig->is_inflated)
5327 generic_context = mono_method_get_context (method);
5328 else if (generic_container)
5329 generic_context = &generic_container->context;
5330 cfg->generic_context = generic_context;
5332 if (!cfg->generic_sharing_context)
5333 g_assert (!sig->has_type_parameters);
5335 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5336 g_assert (method->is_inflated);
5337 g_assert (mono_method_get_context (method)->method_inst);
5339 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5340 g_assert (sig->generic_param_count);
5342 if (cfg->method == method) {
5343 cfg->real_offset = 0;
5345 cfg->real_offset = inline_offset;
5348 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5349 cfg->cil_offset_to_bb_len = header->code_size;
5351 cfg->current_method = method;
5353 if (cfg->verbose_level > 2)
5354 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5356 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5358 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5359 for (n = 0; n < sig->param_count; ++n)
5360 param_types [n + sig->hasthis] = sig->params [n];
5361 cfg->arg_types = param_types;
5363 dont_inline = g_list_prepend (dont_inline, method);
5364 if (cfg->method == method) {
5366 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5367 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5370 NEW_BBLOCK (cfg, start_bblock);
5371 cfg->bb_entry = start_bblock;
5372 start_bblock->cil_code = NULL;
5373 start_bblock->cil_length = 0;
5376 NEW_BBLOCK (cfg, end_bblock);
5377 cfg->bb_exit = end_bblock;
5378 end_bblock->cil_code = NULL;
5379 end_bblock->cil_length = 0;
5380 g_assert (cfg->num_bblocks == 2);
5382 arg_array = cfg->args;
5384 if (header->num_clauses) {
5385 cfg->spvars = g_hash_table_new (NULL, NULL);
5386 cfg->exvars = g_hash_table_new (NULL, NULL);
5388 /* handle exception clauses */
5389 for (i = 0; i < header->num_clauses; ++i) {
5390 MonoBasicBlock *try_bb;
5391 MonoExceptionClause *clause = &header->clauses [i];
5392 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5393 try_bb->real_offset = clause->try_offset;
5394 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5395 tblock->real_offset = clause->handler_offset;
5396 tblock->flags |= BB_EXCEPTION_HANDLER;
5398 link_bblock (cfg, try_bb, tblock);
5400 if (*(ip + clause->handler_offset) == CEE_POP)
5401 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5403 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5404 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5405 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5406 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5407 MONO_ADD_INS (tblock, ins);
5409 /* todo: is a fault block unsafe to optimize? */
5410 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5411 tblock->flags |= BB_EXCEPTION_UNSAFE;
5415 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5417 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5419 /* catch and filter blocks get the exception object on the stack */
5420 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5421 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5422 MonoInst *dummy_use;
5424 /* mostly like handle_stack_args (), but just sets the input args */
5425 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5426 tblock->in_scount = 1;
5427 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5428 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5431 * Add a dummy use for the exvar so its liveness info will be
5435 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5437 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5438 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5439 tblock->flags |= BB_EXCEPTION_HANDLER;
5440 tblock->real_offset = clause->data.filter_offset;
5441 tblock->in_scount = 1;
5442 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5443 /* The filter block shares the exvar with the handler block */
5444 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5445 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5446 MONO_ADD_INS (tblock, ins);
5450 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5451 clause->data.catch_class &&
5452 cfg->generic_sharing_context &&
5453 mono_class_check_context_used (clause->data.catch_class)) {
5455 * In shared generic code with catch
5456 * clauses containing type variables
5457 * the exception handling code has to
5458 * be able to get to the rgctx.
5459 * Therefore we have to make sure that
5460 * the vtable/mrgctx argument (for
5461 * static or generic methods) or the
5462 * "this" argument (for non-static
5463 * methods) are live.
5465 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5466 mini_method_get_context (method)->method_inst ||
5467 method->klass->valuetype) {
5468 mono_get_vtable_var (cfg);
5470 MonoInst *dummy_use;
5472 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5477 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5478 cfg->cbb = start_bblock;
5479 cfg->args = arg_array;
5480 mono_save_args (cfg, sig, inline_args);
5483 /* FIRST CODE BLOCK */
5484 NEW_BBLOCK (cfg, bblock);
5485 bblock->cil_code = ip;
5489 ADD_BBLOCK (cfg, bblock);
5491 if (cfg->method == method) {
5492 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5493 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5494 MONO_INST_NEW (cfg, ins, OP_BREAK);
5495 MONO_ADD_INS (bblock, ins);
5499 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5500 secman = mono_security_manager_get_methods ();
5502 security = (secman && mono_method_has_declsec (method));
5503 /* at this point having security doesn't mean we have any code to generate */
5504 if (security && (cfg->method == method)) {
5505 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5506 * And we do not want to enter the next section (with allocation) if we
5507 * have nothing to generate */
5508 security = mono_declsec_get_demands (method, &actions);
5511 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5512 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5514 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5515 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5516 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5518 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5519 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5523 mono_custom_attrs_free (custom);
5526 custom = mono_custom_attrs_from_class (wrapped->klass);
5527 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5531 mono_custom_attrs_free (custom);
5534 /* not a P/Invoke after all */
5539 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5540 /* we use a separate basic block for the initialization code */
5541 NEW_BBLOCK (cfg, init_localsbb);
5542 cfg->bb_init = init_localsbb;
5543 init_localsbb->real_offset = cfg->real_offset;
5544 start_bblock->next_bb = init_localsbb;
5545 init_localsbb->next_bb = bblock;
5546 link_bblock (cfg, start_bblock, init_localsbb);
5547 link_bblock (cfg, init_localsbb, bblock);
5549 cfg->cbb = init_localsbb;
5551 start_bblock->next_bb = bblock;
5552 link_bblock (cfg, start_bblock, bblock);
5555 /* at this point we know, if security is TRUE, that some code needs to be generated */
5556 if (security && (cfg->method == method)) {
5559 mono_jit_stats.cas_demand_generation++;
5561 if (actions.demand.blob) {
5562 /* Add code for SecurityAction.Demand */
5563 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5564 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5565 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5566 mono_emit_method_call (cfg, secman->demand, args, NULL);
5568 if (actions.noncasdemand.blob) {
5569 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5570 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5571 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5572 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5573 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5574 mono_emit_method_call (cfg, secman->demand, args, NULL);
5576 if (actions.demandchoice.blob) {
5577 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5578 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5579 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5580 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5581 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5585 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5587 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5590 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5591 /* check if this is native code, e.g. an icall or a p/invoke */
5592 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5593 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5595 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5596 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5598 /* if this ia a native call then it can only be JITted from platform code */
5599 if ((icall || pinvk) && method->klass && method->klass->image) {
5600 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5601 MonoException *ex = icall ? mono_get_exception_security () :
5602 mono_get_exception_method_access ();
5603 emit_throw_exception (cfg, ex);
5610 if (header->code_size == 0)
5613 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5618 if (cfg->method == method)
5619 mono_debug_init_method (cfg, bblock, breakpoint_id);
5621 for (n = 0; n < header->num_locals; ++n) {
5622 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5627 /* We force the vtable variable here for all shared methods
5628 for the possibility that they might show up in a stack
5629 trace where their exact instantiation is needed. */
5630 if (cfg->generic_sharing_context && method == cfg->method) {
5631 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5632 mini_method_get_context (method)->method_inst ||
5633 method->klass->valuetype) {
5634 mono_get_vtable_var (cfg);
5636 /* FIXME: Is there a better way to do this?
5637 We need the variable live for the duration
5638 of the whole method. */
5639 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5643 /* add a check for this != NULL to inlined methods */
5644 if (is_virtual_call) {
5647 NEW_ARGLOAD (cfg, arg_ins, 0);
5648 MONO_ADD_INS (cfg->cbb, arg_ins);
5649 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5652 skip_dead_blocks = !dont_verify;
5653 if (skip_dead_blocks) {
5654 original_bb = bb = mono_basic_block_split (method, &error);
5655 if (!mono_error_ok (&error)) {
5656 mono_error_cleanup (&error);
5662 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5663 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5666 start_new_bblock = 0;
5669 if (cfg->method == method)
5670 cfg->real_offset = ip - header->code;
5672 cfg->real_offset = inline_offset;
5677 if (start_new_bblock) {
5678 bblock->cil_length = ip - bblock->cil_code;
5679 if (start_new_bblock == 2) {
5680 g_assert (ip == tblock->cil_code);
5682 GET_BBLOCK (cfg, tblock, ip);
5684 bblock->next_bb = tblock;
5687 start_new_bblock = 0;
5688 for (i = 0; i < bblock->in_scount; ++i) {
5689 if (cfg->verbose_level > 3)
5690 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5691 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5695 g_slist_free (class_inits);
5698 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5699 link_bblock (cfg, bblock, tblock);
5700 if (sp != stack_start) {
5701 handle_stack_args (cfg, stack_start, sp - stack_start);
5703 CHECK_UNVERIFIABLE (cfg);
5705 bblock->next_bb = tblock;
5708 for (i = 0; i < bblock->in_scount; ++i) {
5709 if (cfg->verbose_level > 3)
5710 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5711 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5714 g_slist_free (class_inits);
5719 if (skip_dead_blocks) {
5720 int ip_offset = ip - header->code;
5722 if (ip_offset == bb->end)
5726 int op_size = mono_opcode_size (ip, end);
5727 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5729 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5731 if (ip_offset + op_size == bb->end) {
5732 MONO_INST_NEW (cfg, ins, OP_NOP);
5733 MONO_ADD_INS (bblock, ins);
5734 start_new_bblock = 1;
5742 * Sequence points are points where the debugger can place a breakpoint.
5743 * Currently, we generate these automatically at points where the IL
5746 if (seq_points && sp == stack_start) {
5747 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5748 MONO_ADD_INS (cfg->cbb, ins);
5751 bblock->real_offset = cfg->real_offset;
5753 if ((cfg->method == method) && cfg->coverage_info) {
5754 guint32 cil_offset = ip - header->code;
5755 cfg->coverage_info->data [cil_offset].cil_code = ip;
5757 /* TODO: Use an increment here */
5758 #if defined(TARGET_X86)
5759 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5760 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5762 MONO_ADD_INS (cfg->cbb, ins);
5764 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5765 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5769 if (cfg->verbose_level > 3)
5770 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5774 if (cfg->keep_cil_nops)
5775 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5777 MONO_INST_NEW (cfg, ins, OP_NOP);
5779 MONO_ADD_INS (bblock, ins);
5782 if (should_insert_brekpoint (cfg->method))
5783 MONO_INST_NEW (cfg, ins, OP_BREAK);
5785 MONO_INST_NEW (cfg, ins, OP_NOP);
5787 MONO_ADD_INS (bblock, ins);
5793 CHECK_STACK_OVF (1);
5794 n = (*ip)-CEE_LDARG_0;
5796 EMIT_NEW_ARGLOAD (cfg, ins, n);
5804 CHECK_STACK_OVF (1);
5805 n = (*ip)-CEE_LDLOC_0;
5807 EMIT_NEW_LOCLOAD (cfg, ins, n);
5816 n = (*ip)-CEE_STLOC_0;
5819 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5821 emit_stloc_ir (cfg, sp, header, n);
5828 CHECK_STACK_OVF (1);
5831 EMIT_NEW_ARGLOAD (cfg, ins, n);
5837 CHECK_STACK_OVF (1);
5840 NEW_ARGLOADA (cfg, ins, n);
5841 MONO_ADD_INS (cfg->cbb, ins);
5851 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5853 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5858 CHECK_STACK_OVF (1);
5861 EMIT_NEW_LOCLOAD (cfg, ins, n);
5865 case CEE_LDLOCA_S: {
5866 unsigned char *tmp_ip;
5868 CHECK_STACK_OVF (1);
5869 CHECK_LOCAL (ip [1]);
5871 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5877 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5886 CHECK_LOCAL (ip [1]);
5887 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5889 emit_stloc_ir (cfg, sp, header, ip [1]);
5894 CHECK_STACK_OVF (1);
5895 EMIT_NEW_PCONST (cfg, ins, NULL);
5896 ins->type = STACK_OBJ;
5901 CHECK_STACK_OVF (1);
5902 EMIT_NEW_ICONST (cfg, ins, -1);
5915 CHECK_STACK_OVF (1);
5916 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5922 CHECK_STACK_OVF (1);
5924 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5930 CHECK_STACK_OVF (1);
5931 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5937 CHECK_STACK_OVF (1);
5938 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5939 ins->type = STACK_I8;
5940 ins->dreg = alloc_dreg (cfg, STACK_I8);
5942 ins->inst_l = (gint64)read64 (ip);
5943 MONO_ADD_INS (bblock, ins);
5949 gboolean use_aotconst = FALSE;
5951 #ifdef TARGET_POWERPC
5952 /* FIXME: Clean this up */
5953 if (cfg->compile_aot)
5954 use_aotconst = TRUE;
5957 /* FIXME: we should really allocate this only late in the compilation process */
5958 f = mono_domain_alloc (cfg->domain, sizeof (float));
5960 CHECK_STACK_OVF (1);
5966 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5968 dreg = alloc_freg (cfg);
5969 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5970 ins->type = STACK_R8;
5972 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5973 ins->type = STACK_R8;
5974 ins->dreg = alloc_dreg (cfg, STACK_R8);
5976 MONO_ADD_INS (bblock, ins);
5986 gboolean use_aotconst = FALSE;
5988 #ifdef TARGET_POWERPC
5989 /* FIXME: Clean this up */
5990 if (cfg->compile_aot)
5991 use_aotconst = TRUE;
5994 /* FIXME: we should really allocate this only late in the compilation process */
5995 d = mono_domain_alloc (cfg->domain, sizeof (double));
5997 CHECK_STACK_OVF (1);
6003 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6005 dreg = alloc_freg (cfg);
6006 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6007 ins->type = STACK_R8;
6009 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6010 ins->type = STACK_R8;
6011 ins->dreg = alloc_dreg (cfg, STACK_R8);
6013 MONO_ADD_INS (bblock, ins);
6022 MonoInst *temp, *store;
6024 CHECK_STACK_OVF (1);
6028 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6029 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6031 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6034 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6047 if (sp [0]->type == STACK_R8)
6048 /* we need to pop the value from the x86 FP stack */
6049 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6058 if (stack_start != sp)
6060 token = read32 (ip + 1);
6061 /* FIXME: check the signature matches */
6062 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6067 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6068 GENERIC_SHARING_FAILURE (CEE_JMP);
6070 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6071 CHECK_CFG_EXCEPTION;
6073 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6075 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6078 /* Handle tail calls similarly to calls */
6079 n = fsig->param_count + fsig->hasthis;
6081 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6082 call->method = cmethod;
6083 call->tail_call = TRUE;
6084 call->signature = mono_method_signature (cmethod);
6085 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6086 call->inst.inst_p0 = cmethod;
6087 for (i = 0; i < n; ++i)
6088 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6090 mono_arch_emit_call (cfg, call);
6091 MONO_ADD_INS (bblock, (MonoInst*)call);
6094 for (i = 0; i < num_args; ++i)
6095 /* Prevent arguments from being optimized away */
6096 arg_array [i]->flags |= MONO_INST_VOLATILE;
6098 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6099 ins = (MonoInst*)call;
6100 ins->inst_p0 = cmethod;
6101 MONO_ADD_INS (bblock, ins);
6105 start_new_bblock = 1;
6110 case CEE_CALLVIRT: {
6111 MonoInst *addr = NULL;
6112 MonoMethodSignature *fsig = NULL;
6114 int virtual = *ip == CEE_CALLVIRT;
6115 int calli = *ip == CEE_CALLI;
6116 gboolean pass_imt_from_rgctx = FALSE;
6117 MonoInst *imt_arg = NULL;
6118 gboolean pass_vtable = FALSE;
6119 gboolean pass_mrgctx = FALSE;
6120 MonoInst *vtable_arg = NULL;
6121 gboolean check_this = FALSE;
6122 gboolean supported_tail_call = FALSE;
6125 token = read32 (ip + 1);
6132 if (method->wrapper_type != MONO_WRAPPER_NONE)
6133 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6135 fsig = mono_metadata_parse_signature (image, token);
6137 n = fsig->param_count + fsig->hasthis;
6139 if (method->dynamic && fsig->pinvoke) {
6143 * This is a call through a function pointer using a pinvoke
6144 * signature. Have to create a wrapper and call that instead.
6145 * FIXME: This is very slow, need to create a wrapper at JIT time
6146 * instead based on the signature.
6148 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6149 EMIT_NEW_PCONST (cfg, args [1], fsig);
6151 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6154 MonoMethod *cil_method;
6156 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6157 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6158 cil_method = cmethod;
6159 } else if (constrained_call) {
6160 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6162 * This is needed since get_method_constrained can't find
6163 * the method in klass representing a type var.
6164 * The type var is guaranteed to be a reference type in this
6167 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6168 cil_method = cmethod;
6169 g_assert (!cmethod->klass->valuetype);
6171 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6174 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6175 cil_method = cmethod;
6180 if (!dont_verify && !cfg->skip_visibility) {
6181 MonoMethod *target_method = cil_method;
6182 if (method->is_inflated) {
6183 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6185 if (!mono_method_can_access_method (method_definition, target_method) &&
6186 !mono_method_can_access_method (method, cil_method))
6187 METHOD_ACCESS_FAILURE;
6190 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6191 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6193 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6194 /* MS.NET seems to silently convert this to a callvirt */
6199 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6200 * converts to a callvirt.
6202 * tests/bug-515884.il is an example of this behavior
6204 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6205 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6206 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6210 if (!cmethod->klass->inited)
6211 if (!mono_class_init (cmethod->klass))
6214 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6215 mini_class_is_system_array (cmethod->klass)) {
6216 array_rank = cmethod->klass->rank;
6217 fsig = mono_method_signature (cmethod);
6219 fsig = mono_method_signature (cmethod);
6224 if (fsig->pinvoke) {
6225 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6226 check_for_pending_exc, FALSE);
6227 fsig = mono_method_signature (wrapper);
6228 } else if (constrained_call) {
6229 fsig = mono_method_signature (cmethod);
6231 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6235 mono_save_token_info (cfg, image, token, cil_method);
6237 n = fsig->param_count + fsig->hasthis;
6239 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6240 if (check_linkdemand (cfg, method, cmethod))
6242 CHECK_CFG_EXCEPTION;
6245 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6246 g_assert_not_reached ();
6249 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6252 if (!cfg->generic_sharing_context && cmethod)
6253 g_assert (!mono_method_check_context_used (cmethod));
6257 //g_assert (!virtual || fsig->hasthis);
6261 if (constrained_call) {
6263 * We have the `constrained.' prefix opcode.
6265 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6267 * The type parameter is instantiated as a valuetype,
6268 * but that type doesn't override the method we're
6269 * calling, so we need to box `this'.
6271 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6272 ins->klass = constrained_call;
6273 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6274 CHECK_CFG_EXCEPTION;
6275 } else if (!constrained_call->valuetype) {
6276 int dreg = alloc_preg (cfg);
6279 * The type parameter is instantiated as a reference
6280 * type. We have a managed pointer on the stack, so
6281 * we need to dereference it here.
6283 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6284 ins->type = STACK_OBJ;
6286 } else if (cmethod->klass->valuetype)
6288 constrained_call = NULL;
6291 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6295 * If the callee is a shared method, then its static cctor
6296 * might not get called after the call was patched.
6298 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6299 emit_generic_class_init (cfg, cmethod->klass);
6300 CHECK_TYPELOAD (cmethod->klass);
6303 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6304 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6305 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6306 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6307 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6310 * Pass vtable iff target method might
6311 * be shared, which means that sharing
6312 * is enabled for its class and its
6313 * context is sharable (and it's not a
6316 if (sharing_enabled && context_sharable &&
6317 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6321 if (cmethod && mini_method_get_context (cmethod) &&
6322 mini_method_get_context (cmethod)->method_inst) {
6323 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6324 MonoGenericContext *context = mini_method_get_context (cmethod);
6325 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6327 g_assert (!pass_vtable);
6329 if (sharing_enabled && context_sharable)
6333 if (cfg->generic_sharing_context && cmethod) {
6334 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6336 context_used = mono_method_check_context_used (cmethod);
6338 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6339 /* Generic method interface
6340 calls are resolved via a
6341 helper function and don't
6343 if (!cmethod_context || !cmethod_context->method_inst)
6344 pass_imt_from_rgctx = TRUE;
6348 * If a shared method calls another
6349 * shared method then the caller must
6350 * have a generic sharing context
6351 * because the magic trampoline
6352 * requires it. FIXME: We shouldn't
6353 * have to force the vtable/mrgctx
6354 * variable here. Instead there
6355 * should be a flag in the cfg to
6356 * request a generic sharing context.
6359 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6360 mono_get_vtable_var (cfg);
6365 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6367 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6369 CHECK_TYPELOAD (cmethod->klass);
6370 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6375 g_assert (!vtable_arg);
6377 if (!cfg->compile_aot) {
6379 * emit_get_rgctx_method () calls mono_class_vtable () so check
6380 * for type load errors before.
6382 mono_class_setup_vtable (cmethod->klass);
6383 CHECK_TYPELOAD (cmethod->klass);
6386 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6388 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6389 MONO_METHOD_IS_FINAL (cmethod)) {
6396 if (pass_imt_from_rgctx) {
6397 g_assert (!pass_vtable);
6400 imt_arg = emit_get_rgctx_method (cfg, context_used,
6401 cmethod, MONO_RGCTX_INFO_METHOD);
6405 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6407 /* Calling virtual generic methods */
6408 if (cmethod && virtual &&
6409 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6410 !(MONO_METHOD_IS_FINAL (cmethod) &&
6411 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6412 mono_method_signature (cmethod)->generic_param_count) {
6413 MonoInst *this_temp, *this_arg_temp, *store;
6414 MonoInst *iargs [4];
6416 g_assert (mono_method_signature (cmethod)->is_inflated);
6418 /* Prevent inlining of methods that contain indirect calls */
6421 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6422 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6423 g_assert (!imt_arg);
6425 g_assert (cmethod->is_inflated);
6426 imt_arg = emit_get_rgctx_method (cfg, context_used,
6427 cmethod, MONO_RGCTX_INFO_METHOD);
6428 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6432 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6433 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6434 MONO_ADD_INS (bblock, store);
6436 /* FIXME: This should be a managed pointer */
6437 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6439 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6440 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6441 cmethod, MONO_RGCTX_INFO_METHOD);
6442 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6443 addr = mono_emit_jit_icall (cfg,
6444 mono_helper_compile_generic_method, iargs);
6446 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6448 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6451 if (!MONO_TYPE_IS_VOID (fsig->ret))
6452 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6454 CHECK_CFG_EXCEPTION;
6461 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6462 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6464 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6468 /* FIXME: runtime generic context pointer for jumps? */
6469 /* FIXME: handle this for generic sharing eventually */
6470 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6473 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6476 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6477 /* Handle tail calls similarly to calls */
6478 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6480 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6481 call->tail_call = TRUE;
6482 call->method = cmethod;
6483 call->signature = mono_method_signature (cmethod);
6486 * We implement tail calls by storing the actual arguments into the
6487 * argument variables, then emitting a CEE_JMP.
6489 for (i = 0; i < n; ++i) {
6490 /* Prevent argument from being register allocated */
6491 arg_array [i]->flags |= MONO_INST_VOLATILE;
6492 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6496 ins = (MonoInst*)call;
6497 ins->inst_p0 = cmethod;
6498 ins->inst_p1 = arg_array [0];
6499 MONO_ADD_INS (bblock, ins);
6500 link_bblock (cfg, bblock, end_bblock);
6501 start_new_bblock = 1;
6503 CHECK_CFG_EXCEPTION;
6505 /* skip CEE_RET as well */
6511 /* Conversion to a JIT intrinsic */
6512 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6513 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6514 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6519 CHECK_CFG_EXCEPTION;
6527 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6528 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6529 mono_method_check_inlining (cfg, cmethod) &&
6530 !g_list_find (dont_inline, cmethod)) {
6532 gboolean allways = FALSE;
6534 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6535 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6536 /* Prevent inlining of methods that call wrappers */
6538 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6542 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6544 cfg->real_offset += 5;
6547 if (!MONO_TYPE_IS_VOID (fsig->ret))
6548 /* *sp is already set by inline_method */
6551 inline_costs += costs;
6557 inline_costs += 10 * num_calls++;
6559 /* Tail recursion elimination */
6560 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6561 gboolean has_vtargs = FALSE;
6564 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6567 /* keep it simple */
6568 for (i = fsig->param_count - 1; i >= 0; i--) {
6569 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6574 for (i = 0; i < n; ++i)
6575 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6576 MONO_INST_NEW (cfg, ins, OP_BR);
6577 MONO_ADD_INS (bblock, ins);
6578 tblock = start_bblock->out_bb [0];
6579 link_bblock (cfg, bblock, tblock);
6580 ins->inst_target_bb = tblock;
6581 start_new_bblock = 1;
6583 /* skip the CEE_RET, too */
6584 if (ip_in_bb (cfg, bblock, ip + 5))
6594 /* Generic sharing */
6595 /* FIXME: only do this for generic methods if
6596 they are not shared! */
6597 if (context_used && !imt_arg && !array_rank &&
6598 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6599 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6600 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6601 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6604 g_assert (cfg->generic_sharing_context && cmethod);
6608 * We are compiling a call to a
6609 * generic method from shared code,
6610 * which means that we have to look up
6611 * the method in the rgctx and do an
6614 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6617 /* Indirect calls */
6619 g_assert (!imt_arg);
6621 if (*ip == CEE_CALL)
6622 g_assert (context_used);
6623 else if (*ip == CEE_CALLI)
6624 g_assert (!vtable_arg);
6626 /* FIXME: what the hell is this??? */
6627 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6628 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6630 /* Prevent inlining of methods with indirect calls */
6635 int rgctx_reg = mono_alloc_preg (cfg);
6637 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6638 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6639 call = (MonoCallInst*)ins;
6640 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6642 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6644 * Instead of emitting an indirect call, emit a direct call
6645 * with the contents of the aotconst as the patch info.
6647 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6649 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6650 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6653 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6656 if (!MONO_TYPE_IS_VOID (fsig->ret))
6657 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6659 CHECK_CFG_EXCEPTION;
6670 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6671 if (sp [fsig->param_count]->type == STACK_OBJ) {
6672 MonoInst *iargs [2];
6675 iargs [1] = sp [fsig->param_count];
6677 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6680 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6681 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6682 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6683 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6685 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6688 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6689 if (!cmethod->klass->element_class->valuetype && !readonly)
6690 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6691 CHECK_TYPELOAD (cmethod->klass);
6694 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6697 g_assert_not_reached ();
6700 CHECK_CFG_EXCEPTION;
6707 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6709 if (!MONO_TYPE_IS_VOID (fsig->ret))
6710 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6712 CHECK_CFG_EXCEPTION;
6722 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6724 } else if (imt_arg) {
6725 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6727 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6730 if (!MONO_TYPE_IS_VOID (fsig->ret))
6731 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6733 CHECK_CFG_EXCEPTION;
6740 if (cfg->method != method) {
6741 /* return from inlined method */
6743 * If in_count == 0, that means the ret is unreachable due to
6744 * being preceeded by a throw. In that case, inline_method () will
6745 * handle setting the return value
6746 * (test case: test_0_inline_throw ()).
6748 if (return_var && cfg->cbb->in_count) {
6752 //g_assert (returnvar != -1);
6753 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6754 cfg->ret_var_set = TRUE;
6758 MonoType *ret_type = mono_method_signature (method)->ret;
6762 * Place a seq point here too even through the IL stack is not
6763 * empty, so a step over on
6766 * will work correctly.
6768 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6769 MONO_ADD_INS (cfg->cbb, ins);
6772 g_assert (!return_var);
6775 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6778 if (!cfg->vret_addr) {
6781 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6783 EMIT_NEW_RETLOADA (cfg, ret_addr);
6785 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6786 ins->klass = mono_class_from_mono_type (ret_type);
6789 #ifdef MONO_ARCH_SOFT_FLOAT
6790 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6791 MonoInst *iargs [1];
6795 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6796 mono_arch_emit_setret (cfg, method, conv);
6798 mono_arch_emit_setret (cfg, method, *sp);
6801 mono_arch_emit_setret (cfg, method, *sp);
6806 if (sp != stack_start)
6808 MONO_INST_NEW (cfg, ins, OP_BR);
6810 ins->inst_target_bb = end_bblock;
6811 MONO_ADD_INS (bblock, ins);
6812 link_bblock (cfg, bblock, end_bblock);
6813 start_new_bblock = 1;
6817 MONO_INST_NEW (cfg, ins, OP_BR);
6819 target = ip + 1 + (signed char)(*ip);
6821 GET_BBLOCK (cfg, tblock, target);
6822 link_bblock (cfg, bblock, tblock);
6823 ins->inst_target_bb = tblock;
6824 if (sp != stack_start) {
6825 handle_stack_args (cfg, stack_start, sp - stack_start);
6827 CHECK_UNVERIFIABLE (cfg);
6829 MONO_ADD_INS (bblock, ins);
6830 start_new_bblock = 1;
6831 inline_costs += BRANCH_COST;
6845 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6847 target = ip + 1 + *(signed char*)ip;
6853 inline_costs += BRANCH_COST;
6857 MONO_INST_NEW (cfg, ins, OP_BR);
6860 target = ip + 4 + (gint32)read32(ip);
6862 GET_BBLOCK (cfg, tblock, target);
6863 link_bblock (cfg, bblock, tblock);
6864 ins->inst_target_bb = tblock;
6865 if (sp != stack_start) {
6866 handle_stack_args (cfg, stack_start, sp - stack_start);
6868 CHECK_UNVERIFIABLE (cfg);
6871 MONO_ADD_INS (bblock, ins);
6873 start_new_bblock = 1;
6874 inline_costs += BRANCH_COST;
6881 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6882 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6883 guint32 opsize = is_short ? 1 : 4;
6885 CHECK_OPSIZE (opsize);
6887 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6890 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6895 GET_BBLOCK (cfg, tblock, target);
6896 link_bblock (cfg, bblock, tblock);
6897 GET_BBLOCK (cfg, tblock, ip);
6898 link_bblock (cfg, bblock, tblock);
6900 if (sp != stack_start) {
6901 handle_stack_args (cfg, stack_start, sp - stack_start);
6902 CHECK_UNVERIFIABLE (cfg);
6905 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6906 cmp->sreg1 = sp [0]->dreg;
6907 type_from_op (cmp, sp [0], NULL);
6910 #if SIZEOF_REGISTER == 4
6911 if (cmp->opcode == OP_LCOMPARE_IMM) {
6912 /* Convert it to OP_LCOMPARE */
6913 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6914 ins->type = STACK_I8;
6915 ins->dreg = alloc_dreg (cfg, STACK_I8);
6917 MONO_ADD_INS (bblock, ins);
6918 cmp->opcode = OP_LCOMPARE;
6919 cmp->sreg2 = ins->dreg;
6922 MONO_ADD_INS (bblock, cmp);
6924 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6925 type_from_op (ins, sp [0], NULL);
6926 MONO_ADD_INS (bblock, ins);
6927 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6928 GET_BBLOCK (cfg, tblock, target);
6929 ins->inst_true_bb = tblock;
6930 GET_BBLOCK (cfg, tblock, ip);
6931 ins->inst_false_bb = tblock;
6932 start_new_bblock = 2;
6935 inline_costs += BRANCH_COST;
6950 MONO_INST_NEW (cfg, ins, *ip);
6952 target = ip + 4 + (gint32)read32(ip);
6958 inline_costs += BRANCH_COST;
6962 MonoBasicBlock **targets;
6963 MonoBasicBlock *default_bblock;
6964 MonoJumpInfoBBTable *table;
6965 int offset_reg = alloc_preg (cfg);
6966 int target_reg = alloc_preg (cfg);
6967 int table_reg = alloc_preg (cfg);
6968 int sum_reg = alloc_preg (cfg);
6969 gboolean use_op_switch;
6973 n = read32 (ip + 1);
6976 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6980 CHECK_OPSIZE (n * sizeof (guint32));
6981 target = ip + n * sizeof (guint32);
6983 GET_BBLOCK (cfg, default_bblock, target);
6985 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6986 for (i = 0; i < n; ++i) {
6987 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6988 targets [i] = tblock;
6992 if (sp != stack_start) {
6994 * Link the current bb with the targets as well, so handle_stack_args
6995 * will set their in_stack correctly.
6997 link_bblock (cfg, bblock, default_bblock);
6998 for (i = 0; i < n; ++i)
6999 link_bblock (cfg, bblock, targets [i]);
7001 handle_stack_args (cfg, stack_start, sp - stack_start);
7003 CHECK_UNVERIFIABLE (cfg);
7006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7010 for (i = 0; i < n; ++i)
7011 link_bblock (cfg, bblock, targets [i]);
7013 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7014 table->table = targets;
7015 table->table_size = n;
7017 use_op_switch = FALSE;
7019 /* ARM implements SWITCH statements differently */
7020 /* FIXME: Make it use the generic implementation */
7021 if (!cfg->compile_aot)
7022 use_op_switch = TRUE;
7025 if (COMPILE_LLVM (cfg))
7026 use_op_switch = TRUE;
7028 cfg->cbb->has_jump_table = 1;
7030 if (use_op_switch) {
7031 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7032 ins->sreg1 = src1->dreg;
7033 ins->inst_p0 = table;
7034 ins->inst_many_bb = targets;
7035 ins->klass = GUINT_TO_POINTER (n);
7036 MONO_ADD_INS (cfg->cbb, ins);
7038 if (sizeof (gpointer) == 8)
7039 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7043 #if SIZEOF_REGISTER == 8
7044 /* The upper word might not be zero, and we add it to a 64 bit address later */
7045 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7048 if (cfg->compile_aot) {
7049 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7051 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7052 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7053 ins->inst_p0 = table;
7054 ins->dreg = table_reg;
7055 MONO_ADD_INS (cfg->cbb, ins);
7058 /* FIXME: Use load_memindex */
7059 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7061 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7063 start_new_bblock = 1;
7064 inline_costs += (BRANCH_COST * 2);
7084 dreg = alloc_freg (cfg);
7087 dreg = alloc_lreg (cfg);
7090 dreg = alloc_preg (cfg);
7093 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7094 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7095 ins->flags |= ins_flag;
7097 MONO_ADD_INS (bblock, ins);
7112 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7113 ins->flags |= ins_flag;
7115 MONO_ADD_INS (bblock, ins);
7117 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7118 MonoInst *dummy_use;
7119 /* insert call to write barrier */
7120 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7121 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7122 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7132 MONO_INST_NEW (cfg, ins, (*ip));
7134 ins->sreg1 = sp [0]->dreg;
7135 ins->sreg2 = sp [1]->dreg;
7136 type_from_op (ins, sp [0], sp [1]);
7138 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7140 /* Use the immediate opcodes if possible */
7141 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7142 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7143 if (imm_opcode != -1) {
7144 ins->opcode = imm_opcode;
7145 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7148 sp [1]->opcode = OP_NOP;
7152 MONO_ADD_INS ((cfg)->cbb, (ins));
7154 *sp++ = mono_decompose_opcode (cfg, ins);
7171 MONO_INST_NEW (cfg, ins, (*ip));
7173 ins->sreg1 = sp [0]->dreg;
7174 ins->sreg2 = sp [1]->dreg;
7175 type_from_op (ins, sp [0], sp [1]);
7177 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7178 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7180 /* FIXME: Pass opcode to is_inst_imm */
7182 /* Use the immediate opcodes if possible */
7183 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7186 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7187 if (imm_opcode != -1) {
7188 ins->opcode = imm_opcode;
7189 if (sp [1]->opcode == OP_I8CONST) {
7190 #if SIZEOF_REGISTER == 8
7191 ins->inst_imm = sp [1]->inst_l;
7193 ins->inst_ls_word = sp [1]->inst_ls_word;
7194 ins->inst_ms_word = sp [1]->inst_ms_word;
7198 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7201 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7202 if (sp [1]->next == NULL)
7203 sp [1]->opcode = OP_NOP;
7206 MONO_ADD_INS ((cfg)->cbb, (ins));
7208 *sp++ = mono_decompose_opcode (cfg, ins);
7221 case CEE_CONV_OVF_I8:
7222 case CEE_CONV_OVF_U8:
7226 /* Special case this earlier so we have long constants in the IR */
7227 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7228 int data = sp [-1]->inst_c0;
7229 sp [-1]->opcode = OP_I8CONST;
7230 sp [-1]->type = STACK_I8;
7231 #if SIZEOF_REGISTER == 8
7232 if ((*ip) == CEE_CONV_U8)
7233 sp [-1]->inst_c0 = (guint32)data;
7235 sp [-1]->inst_c0 = data;
7237 sp [-1]->inst_ls_word = data;
7238 if ((*ip) == CEE_CONV_U8)
7239 sp [-1]->inst_ms_word = 0;
7241 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7243 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7250 case CEE_CONV_OVF_I4:
7251 case CEE_CONV_OVF_I1:
7252 case CEE_CONV_OVF_I2:
7253 case CEE_CONV_OVF_I:
7254 case CEE_CONV_OVF_U:
7257 if (sp [-1]->type == STACK_R8) {
7258 ADD_UNOP (CEE_CONV_OVF_I8);
7265 case CEE_CONV_OVF_U1:
7266 case CEE_CONV_OVF_U2:
7267 case CEE_CONV_OVF_U4:
7270 if (sp [-1]->type == STACK_R8) {
7271 ADD_UNOP (CEE_CONV_OVF_U8);
7278 case CEE_CONV_OVF_I1_UN:
7279 case CEE_CONV_OVF_I2_UN:
7280 case CEE_CONV_OVF_I4_UN:
7281 case CEE_CONV_OVF_I8_UN:
7282 case CEE_CONV_OVF_U1_UN:
7283 case CEE_CONV_OVF_U2_UN:
7284 case CEE_CONV_OVF_U4_UN:
7285 case CEE_CONV_OVF_U8_UN:
7286 case CEE_CONV_OVF_I_UN:
7287 case CEE_CONV_OVF_U_UN:
7294 CHECK_CFG_EXCEPTION;
7298 case CEE_ADD_OVF_UN:
7300 case CEE_MUL_OVF_UN:
7302 case CEE_SUB_OVF_UN:
7310 token = read32 (ip + 1);
7311 klass = mini_get_class (method, token, generic_context);
7312 CHECK_TYPELOAD (klass);
7314 if (generic_class_is_reference_type (cfg, klass)) {
7315 MonoInst *store, *load;
7316 int dreg = alloc_preg (cfg);
7318 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7319 load->flags |= ins_flag;
7320 MONO_ADD_INS (cfg->cbb, load);
7322 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7323 store->flags |= ins_flag;
7324 MONO_ADD_INS (cfg->cbb, store);
7326 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7327 MonoInst *dummy_use;
7328 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7329 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7330 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7333 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7345 token = read32 (ip + 1);
7346 klass = mini_get_class (method, token, generic_context);
7347 CHECK_TYPELOAD (klass);
7349 /* Optimize the common ldobj+stloc combination */
7359 loc_index = ip [5] - CEE_STLOC_0;
7366 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7367 CHECK_LOCAL (loc_index);
7369 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7370 ins->dreg = cfg->locals [loc_index]->dreg;
7376 /* Optimize the ldobj+stobj combination */
7377 /* The reference case ends up being a load+store anyway */
7378 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7383 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7390 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7399 CHECK_STACK_OVF (1);
7401 n = read32 (ip + 1);
7403 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7404 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7405 ins->type = STACK_OBJ;
7408 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7409 MonoInst *iargs [1];
7411 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7412 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7414 if (cfg->opt & MONO_OPT_SHARED) {
7415 MonoInst *iargs [3];
7417 if (cfg->compile_aot) {
7418 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7420 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7421 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7422 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7423 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7424 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7426 if (bblock->out_of_line) {
7427 MonoInst *iargs [2];
7429 if (image == mono_defaults.corlib) {
7431 * Avoid relocations in AOT and save some space by using a
7432 * version of helper_ldstr specialized to mscorlib.
7434 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7435 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7437 /* Avoid creating the string object */
7438 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7439 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7440 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7444 if (cfg->compile_aot) {
7445 NEW_LDSTRCONST (cfg, ins, image, n);
7447 MONO_ADD_INS (bblock, ins);
7450 NEW_PCONST (cfg, ins, NULL);
7451 ins->type = STACK_OBJ;
7452 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7454 MONO_ADD_INS (bblock, ins);
7463 MonoInst *iargs [2];
7464 MonoMethodSignature *fsig;
7467 MonoInst *vtable_arg = NULL;
7470 token = read32 (ip + 1);
7471 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7474 fsig = mono_method_get_signature (cmethod, image, token);
7478 mono_save_token_info (cfg, image, token, cmethod);
7480 if (!mono_class_init (cmethod->klass))
7483 if (cfg->generic_sharing_context)
7484 context_used = mono_method_check_context_used (cmethod);
7486 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7487 if (check_linkdemand (cfg, method, cmethod))
7489 CHECK_CFG_EXCEPTION;
7490 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7491 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7494 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7495 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7496 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7497 mono_class_vtable (cfg->domain, cmethod->klass);
7498 CHECK_TYPELOAD (cmethod->klass);
7500 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7501 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7504 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7505 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7507 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7509 CHECK_TYPELOAD (cmethod->klass);
7510 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7515 n = fsig->param_count;
7519 * Generate smaller code for the common newobj <exception> instruction in
7520 * argument checking code.
7522 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7523 is_exception_class (cmethod->klass) && n <= 2 &&
7524 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7525 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7526 MonoInst *iargs [3];
7528 g_assert (!vtable_arg);
7532 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7535 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7539 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7544 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7547 g_assert_not_reached ();
7555 /* move the args to allow room for 'this' in the first position */
7561 /* check_call_signature () requires sp[0] to be set */
7562 this_ins.type = STACK_OBJ;
7564 if (check_call_signature (cfg, fsig, sp))
7569 if (mini_class_is_system_array (cmethod->klass)) {
7570 g_assert (!vtable_arg);
7572 *sp = emit_get_rgctx_method (cfg, context_used,
7573 cmethod, MONO_RGCTX_INFO_METHOD);
7575 /* Avoid varargs in the common case */
7576 if (fsig->param_count == 1)
7577 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7578 else if (fsig->param_count == 2)
7579 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7580 else if (fsig->param_count == 3)
7581 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7583 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7584 } else if (cmethod->string_ctor) {
7585 g_assert (!context_used);
7586 g_assert (!vtable_arg);
7587 /* we simply pass a null pointer */
7588 EMIT_NEW_PCONST (cfg, *sp, NULL);
7589 /* now call the string ctor */
7590 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7592 MonoInst* callvirt_this_arg = NULL;
7594 if (cmethod->klass->valuetype) {
7595 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7596 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7597 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7602 * The code generated by mini_emit_virtual_call () expects
7603 * iargs [0] to be a boxed instance, but luckily the vcall
7604 * will be transformed into a normal call there.
7606 } else if (context_used) {
7607 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7610 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7612 CHECK_TYPELOAD (cmethod->klass);
7615 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7616 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7617 * As a workaround, we call class cctors before allocating objects.
7619 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7620 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7621 if (cfg->verbose_level > 2)
7622 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7623 class_inits = g_slist_prepend (class_inits, vtable);
7626 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7629 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7632 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7634 /* Now call the actual ctor */
7635 /* Avoid virtual calls to ctors if possible */
7636 if (cmethod->klass->marshalbyref)
7637 callvirt_this_arg = sp [0];
7640 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7641 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7642 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7647 CHECK_CFG_EXCEPTION;
7652 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7653 mono_method_check_inlining (cfg, cmethod) &&
7654 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7655 !g_list_find (dont_inline, cmethod)) {
7658 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7659 cfg->real_offset += 5;
7662 inline_costs += costs - 5;
7665 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7667 } else if (context_used &&
7668 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7669 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7670 MonoInst *cmethod_addr;
7672 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7673 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7675 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7678 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7679 callvirt_this_arg, NULL, vtable_arg);
7683 if (alloc == NULL) {
7685 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7686 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7700 token = read32 (ip + 1);
7701 klass = mini_get_class (method, token, generic_context);
7702 CHECK_TYPELOAD (klass);
7703 if (sp [0]->type != STACK_OBJ)
7706 if (cfg->generic_sharing_context)
7707 context_used = mono_class_check_context_used (klass);
7709 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7716 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7718 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7722 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7723 MonoMethod *mono_castclass;
7724 MonoInst *iargs [1];
7727 mono_castclass = mono_marshal_get_castclass (klass);
7730 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7731 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7732 g_assert (costs > 0);
7735 cfg->real_offset += 5;
7740 inline_costs += costs;
7743 ins = handle_castclass (cfg, klass, *sp, context_used);
7744 CHECK_CFG_EXCEPTION;
7754 token = read32 (ip + 1);
7755 klass = mini_get_class (method, token, generic_context);
7756 CHECK_TYPELOAD (klass);
7757 if (sp [0]->type != STACK_OBJ)
7760 if (cfg->generic_sharing_context)
7761 context_used = mono_class_check_context_used (klass);
7763 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7770 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7772 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7776 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7777 MonoMethod *mono_isinst;
7778 MonoInst *iargs [1];
7781 mono_isinst = mono_marshal_get_isinst (klass);
7784 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7785 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7786 g_assert (costs > 0);
7789 cfg->real_offset += 5;
7794 inline_costs += costs;
7797 ins = handle_isinst (cfg, klass, *sp, context_used);
7798 CHECK_CFG_EXCEPTION;
7805 case CEE_UNBOX_ANY: {
7809 token = read32 (ip + 1);
7810 klass = mini_get_class (method, token, generic_context);
7811 CHECK_TYPELOAD (klass);
7813 mono_save_token_info (cfg, image, token, klass);
7815 if (cfg->generic_sharing_context)
7816 context_used = mono_class_check_context_used (klass);
7818 if (generic_class_is_reference_type (cfg, klass)) {
7819 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7821 MonoInst *iargs [2];
7826 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7827 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7831 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7832 MonoMethod *mono_castclass;
7833 MonoInst *iargs [1];
7836 mono_castclass = mono_marshal_get_castclass (klass);
7839 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7840 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7842 g_assert (costs > 0);
7845 cfg->real_offset += 5;
7849 inline_costs += costs;
7851 ins = handle_castclass (cfg, klass, *sp, 0);
7852 CHECK_CFG_EXCEPTION;
7860 if (mono_class_is_nullable (klass)) {
7861 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7868 ins = handle_unbox (cfg, klass, sp, context_used);
7874 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7887 token = read32 (ip + 1);
7888 klass = mini_get_class (method, token, generic_context);
7889 CHECK_TYPELOAD (klass);
7891 mono_save_token_info (cfg, image, token, klass);
7893 if (cfg->generic_sharing_context)
7894 context_used = mono_class_check_context_used (klass);
7896 if (generic_class_is_reference_type (cfg, klass)) {
7902 if (klass == mono_defaults.void_class)
7904 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7906 /* frequent check in generic code: box (struct), brtrue */
7907 if (!mono_class_is_nullable (klass) &&
7908 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7909 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7911 MONO_INST_NEW (cfg, ins, OP_BR);
7912 if (*ip == CEE_BRTRUE_S) {
7915 target = ip + 1 + (signed char)(*ip);
7920 target = ip + 4 + (gint)(read32 (ip));
7923 GET_BBLOCK (cfg, tblock, target);
7924 link_bblock (cfg, bblock, tblock);
7925 ins->inst_target_bb = tblock;
7926 GET_BBLOCK (cfg, tblock, ip);
7928 * This leads to some inconsistency, since the two bblocks are
7929 * not really connected, but it is needed for handling stack
7930 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7931 * FIXME: This should only be needed if sp != stack_start, but that
7932 * doesn't work for some reason (test failure in mcs/tests on x86).
7934 link_bblock (cfg, bblock, tblock);
7935 if (sp != stack_start) {
7936 handle_stack_args (cfg, stack_start, sp - stack_start);
7938 CHECK_UNVERIFIABLE (cfg);
7940 MONO_ADD_INS (bblock, ins);
7941 start_new_bblock = 1;
7945 *sp++ = handle_box (cfg, val, klass, context_used);
7947 CHECK_CFG_EXCEPTION;
7956 token = read32 (ip + 1);
7957 klass = mini_get_class (method, token, generic_context);
7958 CHECK_TYPELOAD (klass);
7960 mono_save_token_info (cfg, image, token, klass);
7962 if (cfg->generic_sharing_context)
7963 context_used = mono_class_check_context_used (klass);
7965 if (mono_class_is_nullable (klass)) {
7968 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7969 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7973 ins = handle_unbox (cfg, klass, sp, context_used);
7983 MonoClassField *field;
7987 if (*ip == CEE_STFLD) {
7994 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7996 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7999 token = read32 (ip + 1);
8000 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8001 field = mono_method_get_wrapper_data (method, token);
8002 klass = field->parent;
8005 field = mono_field_from_token (image, token, &klass, generic_context);
8009 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8010 FIELD_ACCESS_FAILURE;
8011 mono_class_init (klass);
8013 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8014 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8015 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8016 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8019 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8020 if (*ip == CEE_STFLD) {
8021 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8023 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8024 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8025 MonoInst *iargs [5];
8028 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8029 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8030 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8034 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8035 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8036 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8037 g_assert (costs > 0);
8039 cfg->real_offset += 5;
8042 inline_costs += costs;
8044 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8049 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8051 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8052 if (sp [0]->opcode != OP_LDADDR)
8053 store->flags |= MONO_INST_FAULT;
8055 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8056 /* insert call to write barrier */
8057 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8058 MonoInst *iargs [2], *dummy_use;
8061 dreg = alloc_preg (cfg);
8062 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8064 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8066 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8069 store->flags |= ins_flag;
8076 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8077 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8078 MonoInst *iargs [4];
8081 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8082 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8083 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8084 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8085 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8086 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8088 g_assert (costs > 0);
8090 cfg->real_offset += 5;
8094 inline_costs += costs;
8096 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8100 if (sp [0]->type == STACK_VTYPE) {
8103 /* Have to compute the address of the variable */
8105 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8107 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8109 g_assert (var->klass == klass);
8111 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8115 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8117 if (*ip == CEE_LDFLDA) {
8118 dreg = alloc_preg (cfg);
8120 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8121 ins->klass = mono_class_from_mono_type (field->type);
8122 ins->type = STACK_MP;
8127 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8128 load->flags |= ins_flag;
8129 if (sp [0]->opcode != OP_LDADDR)
8130 load->flags |= MONO_INST_FAULT;
8141 MonoClassField *field;
8142 gpointer addr = NULL;
8143 gboolean is_special_static;
8146 token = read32 (ip + 1);
8148 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8149 field = mono_method_get_wrapper_data (method, token);
8150 klass = field->parent;
8153 field = mono_field_from_token (image, token, &klass, generic_context);
8156 mono_class_init (klass);
8157 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8158 FIELD_ACCESS_FAILURE;
8160 /* if the class is Critical then transparent code cannot access it's fields */
8161 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8162 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8165 * We can only support shared generic static
8166 * field access on architectures where the
8167 * trampoline code has been extended to handle
8168 * the generic class init.
8170 #ifndef MONO_ARCH_VTABLE_REG
8171 GENERIC_SHARING_FAILURE (*ip);
8174 if (cfg->generic_sharing_context)
8175 context_used = mono_class_check_context_used (klass);
8177 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8179 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8180 * to be called here.
8182 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8183 mono_class_vtable (cfg->domain, klass);
8184 CHECK_TYPELOAD (klass);
8186 mono_domain_lock (cfg->domain);
8187 if (cfg->domain->special_static_fields)
8188 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8189 mono_domain_unlock (cfg->domain);
8191 is_special_static = mono_class_field_is_special_static (field);
8193 /* Generate IR to compute the field address */
8194 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8196 * Fast access to TLS data
8197 * Inline version of get_thread_static_data () in
8201 int idx, static_data_reg, array_reg, dreg;
8202 MonoInst *thread_ins;
8204 // offset &= 0x7fffffff;
8205 // idx = (offset >> 24) - 1;
8206 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8208 thread_ins = mono_get_thread_intrinsic (cfg);
8209 MONO_ADD_INS (cfg->cbb, thread_ins);
8210 static_data_reg = alloc_ireg (cfg);
8211 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8213 if (cfg->compile_aot) {
8214 int offset_reg, offset2_reg, idx_reg;
8216 /* For TLS variables, this will return the TLS offset */
8217 EMIT_NEW_SFLDACONST (cfg, ins, field);
8218 offset_reg = ins->dreg;
8219 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8220 idx_reg = alloc_ireg (cfg);
8221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8224 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8225 array_reg = alloc_ireg (cfg);
8226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8227 offset2_reg = alloc_ireg (cfg);
8228 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8229 dreg = alloc_ireg (cfg);
8230 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8232 offset = (gsize)addr & 0x7fffffff;
8233 idx = (offset >> 24) - 1;
8235 array_reg = alloc_ireg (cfg);
8236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8237 dreg = alloc_ireg (cfg);
8238 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8240 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8241 (cfg->compile_aot && is_special_static) ||
8242 (context_used && is_special_static)) {
8243 MonoInst *iargs [2];
8245 g_assert (field->parent);
8246 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8248 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8249 field, MONO_RGCTX_INFO_CLASS_FIELD);
8251 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8253 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8254 } else if (context_used) {
8255 MonoInst *static_data;
8258 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8259 method->klass->name_space, method->klass->name, method->name,
8260 depth, field->offset);
8263 if (mono_class_needs_cctor_run (klass, method))
8264 emit_generic_class_init (cfg, klass);
8267 * The pointer we're computing here is
8269 * super_info.static_data + field->offset
8271 static_data = emit_get_rgctx_klass (cfg, context_used,
8272 klass, MONO_RGCTX_INFO_STATIC_DATA);
8274 if (field->offset == 0) {
8277 int addr_reg = mono_alloc_preg (cfg);
8278 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8280 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8281 MonoInst *iargs [2];
8283 g_assert (field->parent);
8284 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8285 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8286 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8288 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8290 CHECK_TYPELOAD (klass);
8292 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8293 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8294 if (cfg->verbose_level > 2)
8295 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8296 class_inits = g_slist_prepend (class_inits, vtable);
8298 if (cfg->run_cctors) {
8300 /* This makes so that inline cannot trigger */
8301 /* .cctors: too many apps depend on them */
8302 /* running with a specific order... */
8303 if (! vtable->initialized)
8305 ex = mono_runtime_class_init_full (vtable, FALSE);
8307 set_exception_object (cfg, ex);
8308 goto exception_exit;
8312 addr = (char*)vtable->data + field->offset;
8314 if (cfg->compile_aot)
8315 EMIT_NEW_SFLDACONST (cfg, ins, field);
8317 EMIT_NEW_PCONST (cfg, ins, addr);
8319 MonoInst *iargs [1];
8320 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8321 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8325 /* Generate IR to do the actual load/store operation */
8327 if (*ip == CEE_LDSFLDA) {
8328 ins->klass = mono_class_from_mono_type (field->type);
8329 ins->type = STACK_PTR;
8331 } else if (*ip == CEE_STSFLD) {
8336 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8337 store->flags |= ins_flag;
8339 gboolean is_const = FALSE;
8340 MonoVTable *vtable = NULL;
8342 if (!context_used) {
8343 vtable = mono_class_vtable (cfg->domain, klass);
8344 CHECK_TYPELOAD (klass);
8346 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8347 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8348 gpointer addr = (char*)vtable->data + field->offset;
8349 int ro_type = field->type->type;
8350 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8351 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8353 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8356 case MONO_TYPE_BOOLEAN:
8358 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8362 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8365 case MONO_TYPE_CHAR:
8367 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8371 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8376 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8380 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8386 case MONO_TYPE_FNPTR:
8387 #ifndef HAVE_MOVING_COLLECTOR
8388 case MONO_TYPE_STRING:
8389 case MONO_TYPE_OBJECT:
8390 case MONO_TYPE_CLASS:
8391 case MONO_TYPE_SZARRAY:
8392 case MONO_TYPE_ARRAY:
8394 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8395 type_to_eval_stack_type ((cfg), field->type, *sp);
8400 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8405 case MONO_TYPE_VALUETYPE:
8415 CHECK_STACK_OVF (1);
8417 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8418 load->flags |= ins_flag;
8431 token = read32 (ip + 1);
8432 klass = mini_get_class (method, token, generic_context);
8433 CHECK_TYPELOAD (klass);
8434 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8435 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8436 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8437 generic_class_is_reference_type (cfg, klass)) {
8438 MonoInst *dummy_use;
8439 /* insert call to write barrier */
8440 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8441 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8442 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8454 const char *data_ptr;
8456 guint32 field_token;
8462 token = read32 (ip + 1);
8464 klass = mini_get_class (method, token, generic_context);
8465 CHECK_TYPELOAD (klass);
8467 if (cfg->generic_sharing_context)
8468 context_used = mono_class_check_context_used (klass);
8470 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8471 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8472 ins->sreg1 = sp [0]->dreg;
8473 ins->type = STACK_I4;
8474 ins->dreg = alloc_ireg (cfg);
8475 MONO_ADD_INS (cfg->cbb, ins);
8476 *sp = mono_decompose_opcode (cfg, ins);
8481 MonoClass *array_class = mono_array_class_get (klass, 1);
8482 /* FIXME: we cannot get a managed
8483 allocator because we can't get the
8484 open generic class's vtable. We
8485 have the same problem in
8486 handle_alloc(). This
8487 needs to be solved so that we can
8488 have managed allocs of shared
8491 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8492 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8494 MonoMethod *managed_alloc = NULL;
8496 /* FIXME: Decompose later to help abcrem */
8499 args [0] = emit_get_rgctx_klass (cfg, context_used,
8500 array_class, MONO_RGCTX_INFO_VTABLE);
8505 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8507 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8509 if (cfg->opt & MONO_OPT_SHARED) {
8510 /* Decompose now to avoid problems with references to the domainvar */
8511 MonoInst *iargs [3];
8513 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8514 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8517 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8519 /* Decompose later since it is needed by abcrem */
8520 MonoClass *array_type = mono_array_class_get (klass, 1);
8521 mono_class_vtable (cfg->domain, array_type);
8522 CHECK_TYPELOAD (array_type);
8524 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8525 ins->dreg = alloc_preg (cfg);
8526 ins->sreg1 = sp [0]->dreg;
8527 ins->inst_newa_class = klass;
8528 ins->type = STACK_OBJ;
8530 MONO_ADD_INS (cfg->cbb, ins);
8531 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8532 cfg->cbb->has_array_access = TRUE;
8534 /* Needed so mono_emit_load_get_addr () gets called */
8535 mono_get_got_var (cfg);
8545 * we inline/optimize the initialization sequence if possible.
8546 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8547 * for small sizes open code the memcpy
8548 * ensure the rva field is big enough
8550 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8551 MonoMethod *memcpy_method = get_memcpy_method ();
8552 MonoInst *iargs [3];
8553 int add_reg = alloc_preg (cfg);
8555 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8556 if (cfg->compile_aot) {
8557 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8559 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8561 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8562 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8571 if (sp [0]->type != STACK_OBJ)
8574 dreg = alloc_preg (cfg);
8575 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8576 ins->dreg = alloc_preg (cfg);
8577 ins->sreg1 = sp [0]->dreg;
8578 ins->type = STACK_I4;
8579 MONO_ADD_INS (cfg->cbb, ins);
8580 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8581 cfg->cbb->has_array_access = TRUE;
8589 if (sp [0]->type != STACK_OBJ)
8592 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8594 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8595 CHECK_TYPELOAD (klass);
8596 /* we need to make sure that this array is exactly the type it needs
8597 * to be for correctness. the wrappers are lax with their usage
8598 * so we need to ignore them here
8600 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8601 MonoClass *array_class = mono_array_class_get (klass, 1);
8602 mini_emit_check_array_type (cfg, sp [0], array_class);
8603 CHECK_TYPELOAD (array_class);
8607 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8622 case CEE_LDELEM_REF: {
8628 if (*ip == CEE_LDELEM) {
8630 token = read32 (ip + 1);
8631 klass = mini_get_class (method, token, generic_context);
8632 CHECK_TYPELOAD (klass);
8633 mono_class_init (klass);
8636 klass = array_access_to_klass (*ip);
8638 if (sp [0]->type != STACK_OBJ)
8641 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8643 if (sp [1]->opcode == OP_ICONST) {
8644 int array_reg = sp [0]->dreg;
8645 int index_reg = sp [1]->dreg;
8646 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8648 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8651 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8655 if (*ip == CEE_LDELEM)
8668 case CEE_STELEM_REF:
8675 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8677 if (*ip == CEE_STELEM) {
8679 token = read32 (ip + 1);
8680 klass = mini_get_class (method, token, generic_context);
8681 CHECK_TYPELOAD (klass);
8682 mono_class_init (klass);
8685 klass = array_access_to_klass (*ip);
8687 if (sp [0]->type != STACK_OBJ)
8690 /* storing a NULL doesn't need any of the complex checks in stelemref */
8691 if (generic_class_is_reference_type (cfg, klass) &&
8692 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8693 MonoMethod* helper = mono_marshal_get_stelemref ();
8694 MonoInst *iargs [3];
8696 if (sp [0]->type != STACK_OBJ)
8698 if (sp [2]->type != STACK_OBJ)
8705 mono_emit_method_call (cfg, helper, iargs, NULL);
8707 if (sp [1]->opcode == OP_ICONST) {
8708 int array_reg = sp [0]->dreg;
8709 int index_reg = sp [1]->dreg;
8710 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8712 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8715 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8716 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8720 if (*ip == CEE_STELEM)
8727 case CEE_CKFINITE: {
8731 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8732 ins->sreg1 = sp [0]->dreg;
8733 ins->dreg = alloc_freg (cfg);
8734 ins->type = STACK_R8;
8735 MONO_ADD_INS (bblock, ins);
8737 *sp++ = mono_decompose_opcode (cfg, ins);
8742 case CEE_REFANYVAL: {
8743 MonoInst *src_var, *src;
8745 int klass_reg = alloc_preg (cfg);
8746 int dreg = alloc_preg (cfg);
8749 MONO_INST_NEW (cfg, ins, *ip);
8752 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8753 CHECK_TYPELOAD (klass);
8754 mono_class_init (klass);
8756 if (cfg->generic_sharing_context)
8757 context_used = mono_class_check_context_used (klass);
8760 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8762 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8763 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8767 MonoInst *klass_ins;
8769 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8770 klass, MONO_RGCTX_INFO_KLASS);
8773 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8774 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8776 mini_emit_class_check (cfg, klass_reg, klass);
8778 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8779 ins->type = STACK_MP;
8784 case CEE_MKREFANY: {
8785 MonoInst *loc, *addr;
8788 MONO_INST_NEW (cfg, ins, *ip);
8791 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8792 CHECK_TYPELOAD (klass);
8793 mono_class_init (klass);
8795 if (cfg->generic_sharing_context)
8796 context_used = mono_class_check_context_used (klass);
8798 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8799 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8802 MonoInst *const_ins;
8803 int type_reg = alloc_preg (cfg);
8805 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8808 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8809 } else if (cfg->compile_aot) {
8810 int const_reg = alloc_preg (cfg);
8811 int type_reg = alloc_preg (cfg);
8813 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8816 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8818 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8823 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8824 ins->type = STACK_VTYPE;
8825 ins->klass = mono_defaults.typed_reference_class;
8832 MonoClass *handle_class;
8834 CHECK_STACK_OVF (1);
8837 n = read32 (ip + 1);
8839 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8840 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8841 handle = mono_method_get_wrapper_data (method, n);
8842 handle_class = mono_method_get_wrapper_data (method, n + 1);
8843 if (handle_class == mono_defaults.typehandle_class)
8844 handle = &((MonoClass*)handle)->byval_arg;
8847 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8851 mono_class_init (handle_class);
8852 if (cfg->generic_sharing_context) {
8853 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8854 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8855 /* This case handles ldtoken
8856 of an open type, like for
8859 } else if (handle_class == mono_defaults.typehandle_class) {
8860 /* If we get a MONO_TYPE_CLASS
8861 then we need to provide the
8863 instantiation of it. */
8864 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8867 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8868 } else if (handle_class == mono_defaults.fieldhandle_class)
8869 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8870 else if (handle_class == mono_defaults.methodhandle_class)
8871 context_used = mono_method_check_context_used (handle);
8873 g_assert_not_reached ();
8876 if ((cfg->opt & MONO_OPT_SHARED) &&
8877 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8878 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8879 MonoInst *addr, *vtvar, *iargs [3];
8880 int method_context_used;
8882 if (cfg->generic_sharing_context)
8883 method_context_used = mono_method_check_context_used (method);
8885 method_context_used = 0;
8887 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8889 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8890 EMIT_NEW_ICONST (cfg, iargs [1], n);
8891 if (method_context_used) {
8892 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8893 method, MONO_RGCTX_INFO_METHOD);
8894 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8896 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8897 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8899 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8903 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8905 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8906 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8907 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8908 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8909 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8910 MonoClass *tclass = mono_class_from_mono_type (handle);
8912 mono_class_init (tclass);
8914 ins = emit_get_rgctx_klass (cfg, context_used,
8915 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8916 } else if (cfg->compile_aot) {
8917 if (method->wrapper_type) {
8918 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8919 /* Special case for static synchronized wrappers */
8920 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8922 /* FIXME: n is not a normal token */
8923 cfg->disable_aot = TRUE;
8924 EMIT_NEW_PCONST (cfg, ins, NULL);
8927 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8930 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8932 ins->type = STACK_OBJ;
8933 ins->klass = cmethod->klass;
8936 MonoInst *addr, *vtvar;
8938 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8941 if (handle_class == mono_defaults.typehandle_class) {
8942 ins = emit_get_rgctx_klass (cfg, context_used,
8943 mono_class_from_mono_type (handle),
8944 MONO_RGCTX_INFO_TYPE);
8945 } else if (handle_class == mono_defaults.methodhandle_class) {
8946 ins = emit_get_rgctx_method (cfg, context_used,
8947 handle, MONO_RGCTX_INFO_METHOD);
8948 } else if (handle_class == mono_defaults.fieldhandle_class) {
8949 ins = emit_get_rgctx_field (cfg, context_used,
8950 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8952 g_assert_not_reached ();
8954 } else if (cfg->compile_aot) {
8955 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8957 EMIT_NEW_PCONST (cfg, ins, handle);
8959 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8960 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8961 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8971 MONO_INST_NEW (cfg, ins, OP_THROW);
8973 ins->sreg1 = sp [0]->dreg;
8975 bblock->out_of_line = TRUE;
8976 MONO_ADD_INS (bblock, ins);
8977 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8978 MONO_ADD_INS (bblock, ins);
8981 link_bblock (cfg, bblock, end_bblock);
8982 start_new_bblock = 1;
8984 case CEE_ENDFINALLY:
8985 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8986 MONO_ADD_INS (bblock, ins);
8988 start_new_bblock = 1;
8991 * Control will leave the method so empty the stack, otherwise
8992 * the next basic block will start with a nonempty stack.
8994 while (sp != stack_start) {
9002 if (*ip == CEE_LEAVE) {
9004 target = ip + 5 + (gint32)read32(ip + 1);
9007 target = ip + 2 + (signed char)(ip [1]);
9010 /* empty the stack */
9011 while (sp != stack_start) {
9016 * If this leave statement is in a catch block, check for a
9017 * pending exception, and rethrow it if necessary.
9018 * We avoid doing this in runtime invoke wrappers, since those are called
9019 * by native code which excepts the wrapper to catch all exceptions.
9021 for (i = 0; i < header->num_clauses; ++i) {
9022 MonoExceptionClause *clause = &header->clauses [i];
9025 * Use <= in the final comparison to handle clauses with multiple
9026 * leave statements, like in bug #78024.
9027 * The ordering of the exception clauses guarantees that we find the
9030 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9032 MonoBasicBlock *dont_throw;
9037 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9040 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9042 NEW_BBLOCK (cfg, dont_throw);
9045 * Currently, we allways rethrow the abort exception, despite the
9046 * fact that this is not correct. See thread6.cs for an example.
9047 * But propagating the abort exception is more important than
9048 * getting the sematics right.
9050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9051 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9052 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9054 MONO_START_BB (cfg, dont_throw);
9059 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9061 MonoExceptionClause *clause;
9063 for (tmp = handlers; tmp; tmp = tmp->next) {
9065 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9067 link_bblock (cfg, bblock, tblock);
9068 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9069 ins->inst_target_bb = tblock;
9070 ins->inst_eh_block = clause;
9071 MONO_ADD_INS (bblock, ins);
9072 bblock->has_call_handler = 1;
9073 if (COMPILE_LLVM (cfg)) {
9074 MonoBasicBlock *target_bb;
9077 * Link the finally bblock with the target, since it will
9078 * conceptually branch there.
9079 * FIXME: Have to link the bblock containing the endfinally.
9081 GET_BBLOCK (cfg, target_bb, target);
9082 link_bblock (cfg, tblock, target_bb);
9085 g_list_free (handlers);
9088 MONO_INST_NEW (cfg, ins, OP_BR);
9089 MONO_ADD_INS (bblock, ins);
9090 GET_BBLOCK (cfg, tblock, target);
9091 link_bblock (cfg, bblock, tblock);
9092 ins->inst_target_bb = tblock;
9093 start_new_bblock = 1;
9095 if (*ip == CEE_LEAVE)
9104 * Mono specific opcodes
9106 case MONO_CUSTOM_PREFIX: {
9108 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9112 case CEE_MONO_ICALL: {
9114 MonoJitICallInfo *info;
9116 token = read32 (ip + 2);
9117 func = mono_method_get_wrapper_data (method, token);
9118 info = mono_find_jit_icall_by_addr (func);
9121 CHECK_STACK (info->sig->param_count);
9122 sp -= info->sig->param_count;
9124 ins = mono_emit_jit_icall (cfg, info->func, sp);
9125 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9129 inline_costs += 10 * num_calls++;
9133 case CEE_MONO_LDPTR: {
9136 CHECK_STACK_OVF (1);
9138 token = read32 (ip + 2);
9140 ptr = mono_method_get_wrapper_data (method, token);
9141 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9142 MonoJitICallInfo *callinfo;
9143 const char *icall_name;
9145 icall_name = method->name + strlen ("__icall_wrapper_");
9146 g_assert (icall_name);
9147 callinfo = mono_find_jit_icall_by_name (icall_name);
9148 g_assert (callinfo);
9150 if (ptr == callinfo->func) {
9151 /* Will be transformed into an AOTCONST later */
9152 EMIT_NEW_PCONST (cfg, ins, ptr);
9158 /* FIXME: Generalize this */
9159 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9160 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9165 EMIT_NEW_PCONST (cfg, ins, ptr);
9168 inline_costs += 10 * num_calls++;
9169 /* Can't embed random pointers into AOT code */
9170 cfg->disable_aot = 1;
9173 case CEE_MONO_ICALL_ADDR: {
9174 MonoMethod *cmethod;
9177 CHECK_STACK_OVF (1);
9179 token = read32 (ip + 2);
9181 cmethod = mono_method_get_wrapper_data (method, token);
9183 if (cfg->compile_aot) {
9184 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9186 ptr = mono_lookup_internal_call (cmethod);
9188 EMIT_NEW_PCONST (cfg, ins, ptr);
9194 case CEE_MONO_VTADDR: {
9195 MonoInst *src_var, *src;
9201 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9202 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9207 case CEE_MONO_NEWOBJ: {
9208 MonoInst *iargs [2];
9210 CHECK_STACK_OVF (1);
9212 token = read32 (ip + 2);
9213 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9214 mono_class_init (klass);
9215 NEW_DOMAINCONST (cfg, iargs [0]);
9216 MONO_ADD_INS (cfg->cbb, iargs [0]);
9217 NEW_CLASSCONST (cfg, iargs [1], klass);
9218 MONO_ADD_INS (cfg->cbb, iargs [1]);
9219 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9221 inline_costs += 10 * num_calls++;
9224 case CEE_MONO_OBJADDR:
9227 MONO_INST_NEW (cfg, ins, OP_MOVE);
9228 ins->dreg = alloc_preg (cfg);
9229 ins->sreg1 = sp [0]->dreg;
9230 ins->type = STACK_MP;
9231 MONO_ADD_INS (cfg->cbb, ins);
9235 case CEE_MONO_LDNATIVEOBJ:
9237 * Similar to LDOBJ, but instead load the unmanaged
9238 * representation of the vtype to the stack.
9243 token = read32 (ip + 2);
9244 klass = mono_method_get_wrapper_data (method, token);
9245 g_assert (klass->valuetype);
9246 mono_class_init (klass);
9249 MonoInst *src, *dest, *temp;
9252 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9253 temp->backend.is_pinvoke = 1;
9254 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9255 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9257 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9258 dest->type = STACK_VTYPE;
9259 dest->klass = klass;
9265 case CEE_MONO_RETOBJ: {
9267 * Same as RET, but return the native representation of a vtype
9270 g_assert (cfg->ret);
9271 g_assert (mono_method_signature (method)->pinvoke);
9276 token = read32 (ip + 2);
9277 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9279 if (!cfg->vret_addr) {
9280 g_assert (cfg->ret_var_is_local);
9282 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9284 EMIT_NEW_RETLOADA (cfg, ins);
9286 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9288 if (sp != stack_start)
9291 MONO_INST_NEW (cfg, ins, OP_BR);
9292 ins->inst_target_bb = end_bblock;
9293 MONO_ADD_INS (bblock, ins);
9294 link_bblock (cfg, bblock, end_bblock);
9295 start_new_bblock = 1;
9299 case CEE_MONO_CISINST:
9300 case CEE_MONO_CCASTCLASS: {
9305 token = read32 (ip + 2);
9306 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9307 if (ip [1] == CEE_MONO_CISINST)
9308 ins = handle_cisinst (cfg, klass, sp [0]);
9310 ins = handle_ccastclass (cfg, klass, sp [0]);
9316 case CEE_MONO_SAVE_LMF:
9317 case CEE_MONO_RESTORE_LMF:
9318 #ifdef MONO_ARCH_HAVE_LMF_OPS
9319 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9320 MONO_ADD_INS (bblock, ins);
9321 cfg->need_lmf_area = TRUE;
9325 case CEE_MONO_CLASSCONST:
9326 CHECK_STACK_OVF (1);
9328 token = read32 (ip + 2);
9329 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9332 inline_costs += 10 * num_calls++;
9334 case CEE_MONO_NOT_TAKEN:
9335 bblock->out_of_line = TRUE;
9339 CHECK_STACK_OVF (1);
9341 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9342 ins->dreg = alloc_preg (cfg);
9343 ins->inst_offset = (gint32)read32 (ip + 2);
9344 ins->type = STACK_PTR;
9345 MONO_ADD_INS (bblock, ins);
9349 case CEE_MONO_DYN_CALL: {
9352 /* It would be easier to call a trampoline, but that would put an
9353 * extra frame on the stack, confusing exception handling. So
9354 * implement it inline using an opcode for now.
9357 if (!cfg->dyn_call_var) {
9358 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9359 /* prevent it from being register allocated */
9360 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9363 /* Has to use a call inst since it local regalloc expects it */
9364 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9365 ins = (MonoInst*)call;
9367 ins->sreg1 = sp [0]->dreg;
9368 ins->sreg2 = sp [1]->dreg;
9369 MONO_ADD_INS (bblock, ins);
9371 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9372 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9376 inline_costs += 10 * num_calls++;
9381 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9391 /* somewhat similar to LDTOKEN */
9392 MonoInst *addr, *vtvar;
9393 CHECK_STACK_OVF (1);
9394 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9396 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9397 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9399 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9400 ins->type = STACK_VTYPE;
9401 ins->klass = mono_defaults.argumenthandle_class;
9414 * The following transforms:
9415 * CEE_CEQ into OP_CEQ
9416 * CEE_CGT into OP_CGT
9417 * CEE_CGT_UN into OP_CGT_UN
9418 * CEE_CLT into OP_CLT
9419 * CEE_CLT_UN into OP_CLT_UN
9421 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9423 MONO_INST_NEW (cfg, ins, cmp->opcode);
9425 cmp->sreg1 = sp [0]->dreg;
9426 cmp->sreg2 = sp [1]->dreg;
9427 type_from_op (cmp, sp [0], sp [1]);
9429 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9430 cmp->opcode = OP_LCOMPARE;
9431 else if (sp [0]->type == STACK_R8)
9432 cmp->opcode = OP_FCOMPARE;
9434 cmp->opcode = OP_ICOMPARE;
9435 MONO_ADD_INS (bblock, cmp);
9436 ins->type = STACK_I4;
9437 ins->dreg = alloc_dreg (cfg, ins->type);
9438 type_from_op (ins, sp [0], sp [1]);
9440 if (cmp->opcode == OP_FCOMPARE) {
9442 * The backends expect the fceq opcodes to do the
9445 cmp->opcode = OP_NOP;
9446 ins->sreg1 = cmp->sreg1;
9447 ins->sreg2 = cmp->sreg2;
9449 MONO_ADD_INS (bblock, ins);
9456 MonoMethod *cil_method;
9457 gboolean needs_static_rgctx_invoke;
9459 CHECK_STACK_OVF (1);
9461 n = read32 (ip + 2);
9462 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9465 mono_class_init (cmethod->klass);
9467 mono_save_token_info (cfg, image, n, cmethod);
9469 if (cfg->generic_sharing_context)
9470 context_used = mono_method_check_context_used (cmethod);
9472 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9474 cil_method = cmethod;
9475 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9476 METHOD_ACCESS_FAILURE;
9478 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9479 if (check_linkdemand (cfg, method, cmethod))
9481 CHECK_CFG_EXCEPTION;
9482 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9483 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9487 * Optimize the common case of ldftn+delegate creation
9489 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9490 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9491 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9493 int invoke_context_used = 0;
9495 invoke = mono_get_delegate_invoke (ctor_method->klass);
9496 if (!invoke || !mono_method_signature (invoke))
9499 if (cfg->generic_sharing_context)
9500 invoke_context_used = mono_method_check_context_used (invoke);
9502 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9503 /* FIXME: SGEN support */
9504 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9505 MonoInst *target_ins;
9508 if (cfg->verbose_level > 3)
9509 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9510 target_ins = sp [-1];
9512 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9513 CHECK_CFG_EXCEPTION;
9522 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9523 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9527 inline_costs += 10 * num_calls++;
9530 case CEE_LDVIRTFTN: {
9535 n = read32 (ip + 2);
9536 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9539 mono_class_init (cmethod->klass);
9541 if (cfg->generic_sharing_context)
9542 context_used = mono_method_check_context_used (cmethod);
9544 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9545 if (check_linkdemand (cfg, method, cmethod))
9547 CHECK_CFG_EXCEPTION;
9548 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9549 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9555 args [1] = emit_get_rgctx_method (cfg, context_used,
9556 cmethod, MONO_RGCTX_INFO_METHOD);
9559 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9561 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9564 inline_costs += 10 * num_calls++;
9568 CHECK_STACK_OVF (1);
9570 n = read16 (ip + 2);
9572 EMIT_NEW_ARGLOAD (cfg, ins, n);
9577 CHECK_STACK_OVF (1);
9579 n = read16 (ip + 2);
9581 NEW_ARGLOADA (cfg, ins, n);
9582 MONO_ADD_INS (cfg->cbb, ins);
9590 n = read16 (ip + 2);
9592 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9594 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9598 CHECK_STACK_OVF (1);
9600 n = read16 (ip + 2);
9602 EMIT_NEW_LOCLOAD (cfg, ins, n);
9607 unsigned char *tmp_ip;
9608 CHECK_STACK_OVF (1);
9610 n = read16 (ip + 2);
9613 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9619 EMIT_NEW_LOCLOADA (cfg, ins, n);
9628 n = read16 (ip + 2);
9630 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9632 emit_stloc_ir (cfg, sp, header, n);
9639 if (sp != stack_start)
9641 if (cfg->method != method)
9643 * Inlining this into a loop in a parent could lead to
9644 * stack overflows which is different behavior than the
9645 * non-inlined case, thus disable inlining in this case.
9647 goto inline_failure;
9649 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9650 ins->dreg = alloc_preg (cfg);
9651 ins->sreg1 = sp [0]->dreg;
9652 ins->type = STACK_PTR;
9653 MONO_ADD_INS (cfg->cbb, ins);
9655 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9657 ins->flags |= MONO_INST_INIT;
9662 case CEE_ENDFILTER: {
9663 MonoExceptionClause *clause, *nearest;
9664 int cc, nearest_num;
9668 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9670 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9671 ins->sreg1 = (*sp)->dreg;
9672 MONO_ADD_INS (bblock, ins);
9673 start_new_bblock = 1;
9678 for (cc = 0; cc < header->num_clauses; ++cc) {
9679 clause = &header->clauses [cc];
9680 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9681 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9682 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9688 if ((ip - header->code) != nearest->handler_offset)
9693 case CEE_UNALIGNED_:
9694 ins_flag |= MONO_INST_UNALIGNED;
9695 /* FIXME: record alignment? we can assume 1 for now */
9700 ins_flag |= MONO_INST_VOLATILE;
9704 ins_flag |= MONO_INST_TAILCALL;
9705 cfg->flags |= MONO_CFG_HAS_TAIL;
9706 /* Can't inline tail calls at this time */
9707 inline_costs += 100000;
9714 token = read32 (ip + 2);
9715 klass = mini_get_class (method, token, generic_context);
9716 CHECK_TYPELOAD (klass);
9717 if (generic_class_is_reference_type (cfg, klass))
9718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9720 mini_emit_initobj (cfg, *sp, NULL, klass);
9724 case CEE_CONSTRAINED_:
9726 token = read32 (ip + 2);
9727 if (method->wrapper_type != MONO_WRAPPER_NONE)
9728 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9730 constrained_call = mono_class_get_full (image, token, generic_context);
9731 CHECK_TYPELOAD (constrained_call);
9736 MonoInst *iargs [3];
9740 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9741 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9742 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9743 /* emit_memset only works when val == 0 */
9744 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9749 if (ip [1] == CEE_CPBLK) {
9750 MonoMethod *memcpy_method = get_memcpy_method ();
9751 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9753 MonoMethod *memset_method = get_memset_method ();
9754 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9764 ins_flag |= MONO_INST_NOTYPECHECK;
9766 ins_flag |= MONO_INST_NORANGECHECK;
9767 /* we ignore the no-nullcheck for now since we
9768 * really do it explicitly only when doing callvirt->call
9774 int handler_offset = -1;
9776 for (i = 0; i < header->num_clauses; ++i) {
9777 MonoExceptionClause *clause = &header->clauses [i];
9778 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9779 handler_offset = clause->handler_offset;
9784 bblock->flags |= BB_EXCEPTION_UNSAFE;
9786 g_assert (handler_offset != -1);
9788 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9789 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9790 ins->sreg1 = load->dreg;
9791 MONO_ADD_INS (bblock, ins);
9793 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9794 MONO_ADD_INS (bblock, ins);
9797 link_bblock (cfg, bblock, end_bblock);
9798 start_new_bblock = 1;
9806 CHECK_STACK_OVF (1);
9808 token = read32 (ip + 2);
9809 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9810 MonoType *type = mono_type_create_from_typespec (image, token);
9811 token = mono_type_size (type, &ialign);
9813 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9814 CHECK_TYPELOAD (klass);
9815 mono_class_init (klass);
9816 token = mono_class_value_size (klass, &align);
9818 EMIT_NEW_ICONST (cfg, ins, token);
9823 case CEE_REFANYTYPE: {
9824 MonoInst *src_var, *src;
9830 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9832 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9833 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9852 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9862 g_warning ("opcode 0x%02x not handled", *ip);
9866 if (start_new_bblock != 1)
9869 bblock->cil_length = ip - bblock->cil_code;
9870 bblock->next_bb = end_bblock;
9872 if (cfg->method == method && cfg->domainvar) {
9874 MonoInst *get_domain;
9876 cfg->cbb = init_localsbb;
9878 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9879 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9882 get_domain->dreg = alloc_preg (cfg);
9883 MONO_ADD_INS (cfg->cbb, get_domain);
9885 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9886 MONO_ADD_INS (cfg->cbb, store);
9889 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9890 if (cfg->compile_aot)
9891 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9892 mono_get_got_var (cfg);
9895 if (cfg->method == method && cfg->got_var)
9896 mono_emit_load_got_addr (cfg);
9901 cfg->cbb = init_localsbb;
9903 for (i = 0; i < header->num_locals; ++i) {
9904 MonoType *ptype = header->locals [i];
9905 int t = ptype->type;
9906 dreg = cfg->locals [i]->dreg;
9908 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9909 t = mono_class_enum_basetype (ptype->data.klass)->type;
9911 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9912 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9913 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9914 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9915 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9916 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9917 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9918 ins->type = STACK_R8;
9919 ins->inst_p0 = (void*)&r8_0;
9920 ins->dreg = alloc_dreg (cfg, STACK_R8);
9921 MONO_ADD_INS (init_localsbb, ins);
9922 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9923 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9924 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9925 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9927 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9932 if (cfg->init_ref_vars && cfg->method == method) {
9933 /* Emit initialization for ref vars */
9934 // FIXME: Avoid duplication initialization for IL locals.
9935 for (i = 0; i < cfg->num_varinfo; ++i) {
9936 MonoInst *ins = cfg->varinfo [i];
9938 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9939 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9943 /* Add a sequence point for method entry/exit events */
9945 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9946 MONO_ADD_INS (init_localsbb, ins);
9947 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9948 MONO_ADD_INS (cfg->bb_exit, ins);
9953 if (cfg->method == method) {
9955 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9956 bb->region = mono_find_block_region (cfg, bb->real_offset);
9958 mono_create_spvar_for_region (cfg, bb->region);
9959 if (cfg->verbose_level > 2)
9960 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9964 g_slist_free (class_inits);
9965 dont_inline = g_list_remove (dont_inline, method);
9967 if (inline_costs < 0) {
9970 /* Method is too large */
9971 mname = mono_method_full_name (method, TRUE);
9972 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9973 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9975 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9976 mono_basic_block_free (original_bb);
9980 if ((cfg->verbose_level > 2) && (cfg->method == method))
9981 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9983 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9984 mono_basic_block_free (original_bb);
9985 return inline_costs;
9988 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9995 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9999 set_exception_type_from_invalid_il (cfg, method, ip);
10003 g_slist_free (class_inits);
10004 mono_basic_block_free (original_bb);
10005 dont_inline = g_list_remove (dont_inline, method);
10006 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10011 store_membase_reg_to_store_membase_imm (int opcode)
10014 case OP_STORE_MEMBASE_REG:
10015 return OP_STORE_MEMBASE_IMM;
10016 case OP_STOREI1_MEMBASE_REG:
10017 return OP_STOREI1_MEMBASE_IMM;
10018 case OP_STOREI2_MEMBASE_REG:
10019 return OP_STOREI2_MEMBASE_IMM;
10020 case OP_STOREI4_MEMBASE_REG:
10021 return OP_STOREI4_MEMBASE_IMM;
10022 case OP_STOREI8_MEMBASE_REG:
10023 return OP_STOREI8_MEMBASE_IMM;
10025 g_assert_not_reached ();
10031 #endif /* DISABLE_JIT */
10034 mono_op_to_op_imm (int opcode)
10038 return OP_IADD_IMM;
10040 return OP_ISUB_IMM;
10042 return OP_IDIV_IMM;
10044 return OP_IDIV_UN_IMM;
10046 return OP_IREM_IMM;
10048 return OP_IREM_UN_IMM;
10050 return OP_IMUL_IMM;
10052 return OP_IAND_IMM;
10056 return OP_IXOR_IMM;
10058 return OP_ISHL_IMM;
10060 return OP_ISHR_IMM;
10062 return OP_ISHR_UN_IMM;
10065 return OP_LADD_IMM;
10067 return OP_LSUB_IMM;
10069 return OP_LAND_IMM;
10073 return OP_LXOR_IMM;
10075 return OP_LSHL_IMM;
10077 return OP_LSHR_IMM;
10079 return OP_LSHR_UN_IMM;
10082 return OP_COMPARE_IMM;
10084 return OP_ICOMPARE_IMM;
10086 return OP_LCOMPARE_IMM;
10088 case OP_STORE_MEMBASE_REG:
10089 return OP_STORE_MEMBASE_IMM;
10090 case OP_STOREI1_MEMBASE_REG:
10091 return OP_STOREI1_MEMBASE_IMM;
10092 case OP_STOREI2_MEMBASE_REG:
10093 return OP_STOREI2_MEMBASE_IMM;
10094 case OP_STOREI4_MEMBASE_REG:
10095 return OP_STOREI4_MEMBASE_IMM;
10097 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10099 return OP_X86_PUSH_IMM;
10100 case OP_X86_COMPARE_MEMBASE_REG:
10101 return OP_X86_COMPARE_MEMBASE_IMM;
10103 #if defined(TARGET_AMD64)
10104 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10105 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10107 case OP_VOIDCALL_REG:
10108 return OP_VOIDCALL;
10116 return OP_LOCALLOC_IMM;
10123 ldind_to_load_membase (int opcode)
10127 return OP_LOADI1_MEMBASE;
10129 return OP_LOADU1_MEMBASE;
10131 return OP_LOADI2_MEMBASE;
10133 return OP_LOADU2_MEMBASE;
10135 return OP_LOADI4_MEMBASE;
10137 return OP_LOADU4_MEMBASE;
10139 return OP_LOAD_MEMBASE;
10140 case CEE_LDIND_REF:
10141 return OP_LOAD_MEMBASE;
10143 return OP_LOADI8_MEMBASE;
10145 return OP_LOADR4_MEMBASE;
10147 return OP_LOADR8_MEMBASE;
10149 g_assert_not_reached ();
10156 stind_to_store_membase (int opcode)
10160 return OP_STOREI1_MEMBASE_REG;
10162 return OP_STOREI2_MEMBASE_REG;
10164 return OP_STOREI4_MEMBASE_REG;
10166 case CEE_STIND_REF:
10167 return OP_STORE_MEMBASE_REG;
10169 return OP_STOREI8_MEMBASE_REG;
10171 return OP_STORER4_MEMBASE_REG;
10173 return OP_STORER8_MEMBASE_REG;
10175 g_assert_not_reached ();
10182 mono_load_membase_to_load_mem (int opcode)
10184 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10185 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10187 case OP_LOAD_MEMBASE:
10188 return OP_LOAD_MEM;
10189 case OP_LOADU1_MEMBASE:
10190 return OP_LOADU1_MEM;
10191 case OP_LOADU2_MEMBASE:
10192 return OP_LOADU2_MEM;
10193 case OP_LOADI4_MEMBASE:
10194 return OP_LOADI4_MEM;
10195 case OP_LOADU4_MEMBASE:
10196 return OP_LOADU4_MEM;
10197 #if SIZEOF_REGISTER == 8
10198 case OP_LOADI8_MEMBASE:
10199 return OP_LOADI8_MEM;
10208 op_to_op_dest_membase (int store_opcode, int opcode)
10210 #if defined(TARGET_X86)
10211 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10216 return OP_X86_ADD_MEMBASE_REG;
10218 return OP_X86_SUB_MEMBASE_REG;
10220 return OP_X86_AND_MEMBASE_REG;
10222 return OP_X86_OR_MEMBASE_REG;
10224 return OP_X86_XOR_MEMBASE_REG;
10227 return OP_X86_ADD_MEMBASE_IMM;
10230 return OP_X86_SUB_MEMBASE_IMM;
10233 return OP_X86_AND_MEMBASE_IMM;
10236 return OP_X86_OR_MEMBASE_IMM;
10239 return OP_X86_XOR_MEMBASE_IMM;
10245 #if defined(TARGET_AMD64)
10246 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10251 return OP_X86_ADD_MEMBASE_REG;
10253 return OP_X86_SUB_MEMBASE_REG;
10255 return OP_X86_AND_MEMBASE_REG;
10257 return OP_X86_OR_MEMBASE_REG;
10259 return OP_X86_XOR_MEMBASE_REG;
10261 return OP_X86_ADD_MEMBASE_IMM;
10263 return OP_X86_SUB_MEMBASE_IMM;
10265 return OP_X86_AND_MEMBASE_IMM;
10267 return OP_X86_OR_MEMBASE_IMM;
10269 return OP_X86_XOR_MEMBASE_IMM;
10271 return OP_AMD64_ADD_MEMBASE_REG;
10273 return OP_AMD64_SUB_MEMBASE_REG;
10275 return OP_AMD64_AND_MEMBASE_REG;
10277 return OP_AMD64_OR_MEMBASE_REG;
10279 return OP_AMD64_XOR_MEMBASE_REG;
10282 return OP_AMD64_ADD_MEMBASE_IMM;
10285 return OP_AMD64_SUB_MEMBASE_IMM;
10288 return OP_AMD64_AND_MEMBASE_IMM;
10291 return OP_AMD64_OR_MEMBASE_IMM;
10294 return OP_AMD64_XOR_MEMBASE_IMM;
10304 op_to_op_store_membase (int store_opcode, int opcode)
10306 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10309 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10310 return OP_X86_SETEQ_MEMBASE;
10312 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10313 return OP_X86_SETNE_MEMBASE;
10321 op_to_op_src1_membase (int load_opcode, int opcode)
10324 /* FIXME: This has sign extension issues */
10326 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10327 return OP_X86_COMPARE_MEMBASE8_IMM;
10330 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10335 return OP_X86_PUSH_MEMBASE;
10336 case OP_COMPARE_IMM:
10337 case OP_ICOMPARE_IMM:
10338 return OP_X86_COMPARE_MEMBASE_IMM;
10341 return OP_X86_COMPARE_MEMBASE_REG;
10345 #ifdef TARGET_AMD64
10346 /* FIXME: This has sign extension issues */
10348 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10349 return OP_X86_COMPARE_MEMBASE8_IMM;
10354 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10355 return OP_X86_PUSH_MEMBASE;
10357 /* FIXME: This only works for 32 bit immediates
10358 case OP_COMPARE_IMM:
10359 case OP_LCOMPARE_IMM:
10360 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10361 return OP_AMD64_COMPARE_MEMBASE_IMM;
10363 case OP_ICOMPARE_IMM:
10364 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10365 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10369 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10370 return OP_AMD64_COMPARE_MEMBASE_REG;
10373 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10374 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10383 op_to_op_src2_membase (int load_opcode, int opcode)
10386 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10392 return OP_X86_COMPARE_REG_MEMBASE;
10394 return OP_X86_ADD_REG_MEMBASE;
10396 return OP_X86_SUB_REG_MEMBASE;
10398 return OP_X86_AND_REG_MEMBASE;
10400 return OP_X86_OR_REG_MEMBASE;
10402 return OP_X86_XOR_REG_MEMBASE;
10406 #ifdef TARGET_AMD64
10409 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10410 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10414 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10415 return OP_AMD64_COMPARE_REG_MEMBASE;
10418 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10419 return OP_X86_ADD_REG_MEMBASE;
10421 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10422 return OP_X86_SUB_REG_MEMBASE;
10424 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10425 return OP_X86_AND_REG_MEMBASE;
10427 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10428 return OP_X86_OR_REG_MEMBASE;
10430 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10431 return OP_X86_XOR_REG_MEMBASE;
10433 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10434 return OP_AMD64_ADD_REG_MEMBASE;
10436 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10437 return OP_AMD64_SUB_REG_MEMBASE;
10439 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10440 return OP_AMD64_AND_REG_MEMBASE;
10442 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10443 return OP_AMD64_OR_REG_MEMBASE;
10445 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10446 return OP_AMD64_XOR_REG_MEMBASE;
10454 mono_op_to_op_imm_noemul (int opcode)
10457 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10463 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10471 return mono_op_to_op_imm (opcode);
10475 #ifndef DISABLE_JIT
10478 * mono_handle_global_vregs:
10480 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10484 mono_handle_global_vregs (MonoCompile *cfg)
10486 gint32 *vreg_to_bb;
10487 MonoBasicBlock *bb;
10490 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10492 #ifdef MONO_ARCH_SIMD_INTRINSICS
10493 if (cfg->uses_simd_intrinsics)
10494 mono_simd_simplify_indirection (cfg);
10497 /* Find local vregs used in more than one bb */
10498 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10499 MonoInst *ins = bb->code;
10500 int block_num = bb->block_num;
10502 if (cfg->verbose_level > 2)
10503 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10506 for (; ins; ins = ins->next) {
10507 const char *spec = INS_INFO (ins->opcode);
10508 int regtype = 0, regindex;
10511 if (G_UNLIKELY (cfg->verbose_level > 2))
10512 mono_print_ins (ins);
10514 g_assert (ins->opcode >= MONO_CEE_LAST);
10516 for (regindex = 0; regindex < 4; regindex ++) {
10519 if (regindex == 0) {
10520 regtype = spec [MONO_INST_DEST];
10521 if (regtype == ' ')
10524 } else if (regindex == 1) {
10525 regtype = spec [MONO_INST_SRC1];
10526 if (regtype == ' ')
10529 } else if (regindex == 2) {
10530 regtype = spec [MONO_INST_SRC2];
10531 if (regtype == ' ')
10534 } else if (regindex == 3) {
10535 regtype = spec [MONO_INST_SRC3];
10536 if (regtype == ' ')
10541 #if SIZEOF_REGISTER == 4
10542 /* In the LLVM case, the long opcodes are not decomposed */
10543 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10545 * Since some instructions reference the original long vreg,
10546 * and some reference the two component vregs, it is quite hard
10547 * to determine when it needs to be global. So be conservative.
10549 if (!get_vreg_to_inst (cfg, vreg)) {
10550 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10552 if (cfg->verbose_level > 2)
10553 printf ("LONG VREG R%d made global.\n", vreg);
10557 * Make the component vregs volatile since the optimizations can
10558 * get confused otherwise.
10560 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10561 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10565 g_assert (vreg != -1);
10567 prev_bb = vreg_to_bb [vreg];
10568 if (prev_bb == 0) {
10569 /* 0 is a valid block num */
10570 vreg_to_bb [vreg] = block_num + 1;
10571 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10572 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10575 if (!get_vreg_to_inst (cfg, vreg)) {
10576 if (G_UNLIKELY (cfg->verbose_level > 2))
10577 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10581 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10584 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10587 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10590 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10593 g_assert_not_reached ();
10597 /* Flag as having been used in more than one bb */
10598 vreg_to_bb [vreg] = -1;
10604 /* If a variable is used in only one bblock, convert it into a local vreg */
10605 for (i = 0; i < cfg->num_varinfo; i++) {
10606 MonoInst *var = cfg->varinfo [i];
10607 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10609 switch (var->type) {
10615 #if SIZEOF_REGISTER == 8
10618 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10619 /* Enabling this screws up the fp stack on x86 */
10622 /* Arguments are implicitly global */
10623 /* Putting R4 vars into registers doesn't work currently */
10624 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10626 * Make that the variable's liveness interval doesn't contain a call, since
10627 * that would cause the lvreg to be spilled, making the whole optimization
10630 /* This is too slow for JIT compilation */
10632 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10634 int def_index, call_index, ins_index;
10635 gboolean spilled = FALSE;
10640 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10641 const char *spec = INS_INFO (ins->opcode);
10643 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10644 def_index = ins_index;
10646 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10647 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10648 if (call_index > def_index) {
10654 if (MONO_IS_CALL (ins))
10655 call_index = ins_index;
10665 if (G_UNLIKELY (cfg->verbose_level > 2))
10666 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10667 var->flags |= MONO_INST_IS_DEAD;
10668 cfg->vreg_to_inst [var->dreg] = NULL;
10675 * Compress the varinfo and vars tables so the liveness computation is faster and
10676 * takes up less space.
10679 for (i = 0; i < cfg->num_varinfo; ++i) {
10680 MonoInst *var = cfg->varinfo [i];
10681 if (pos < i && cfg->locals_start == i)
10682 cfg->locals_start = pos;
10683 if (!(var->flags & MONO_INST_IS_DEAD)) {
10685 cfg->varinfo [pos] = cfg->varinfo [i];
10686 cfg->varinfo [pos]->inst_c0 = pos;
10687 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10688 cfg->vars [pos].idx = pos;
10689 #if SIZEOF_REGISTER == 4
10690 if (cfg->varinfo [pos]->type == STACK_I8) {
10691 /* Modify the two component vars too */
10694 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10695 var1->inst_c0 = pos;
10696 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10697 var1->inst_c0 = pos;
10704 cfg->num_varinfo = pos;
10705 if (cfg->locals_start > cfg->num_varinfo)
10706 cfg->locals_start = cfg->num_varinfo;
10710 * mono_spill_global_vars:
10712 * Generate spill code for variables which are not allocated to registers,
10713 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10714 * code is generated which could be optimized by the local optimization passes.
10717 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10719 MonoBasicBlock *bb;
10721 int orig_next_vreg;
10722 guint32 *vreg_to_lvreg;
10724 guint32 i, lvregs_len;
10725 gboolean dest_has_lvreg = FALSE;
10726 guint32 stacktypes [128];
10727 MonoInst **live_range_start, **live_range_end;
10728 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10730 *need_local_opts = FALSE;
10732 memset (spec2, 0, sizeof (spec2));
10734 /* FIXME: Move this function to mini.c */
10735 stacktypes ['i'] = STACK_PTR;
10736 stacktypes ['l'] = STACK_I8;
10737 stacktypes ['f'] = STACK_R8;
10738 #ifdef MONO_ARCH_SIMD_INTRINSICS
10739 stacktypes ['x'] = STACK_VTYPE;
10742 #if SIZEOF_REGISTER == 4
10743 /* Create MonoInsts for longs */
10744 for (i = 0; i < cfg->num_varinfo; i++) {
10745 MonoInst *ins = cfg->varinfo [i];
10747 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10748 switch (ins->type) {
10753 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10756 g_assert (ins->opcode == OP_REGOFFSET);
10758 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10760 tree->opcode = OP_REGOFFSET;
10761 tree->inst_basereg = ins->inst_basereg;
10762 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10764 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10766 tree->opcode = OP_REGOFFSET;
10767 tree->inst_basereg = ins->inst_basereg;
10768 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10778 /* FIXME: widening and truncation */
10781 * As an optimization, when a variable allocated to the stack is first loaded into
10782 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10783 * the variable again.
10785 orig_next_vreg = cfg->next_vreg;
10786 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10787 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10791 * These arrays contain the first and last instructions accessing a given
10793 * Since we emit bblocks in the same order we process them here, and we
10794 * don't split live ranges, these will precisely describe the live range of
10795 * the variable, i.e. the instruction range where a valid value can be found
10796 * in the variables location.
10797 * The live range is computed using the liveness info computed by the liveness pass.
10798 * We can't use vmv->range, since that is an abstract live range, and we need
10799 * one which is instruction precise.
10800 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10802 /* FIXME: Only do this if debugging info is requested */
10803 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10804 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10805 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10806 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10808 /* Add spill loads/stores */
10809 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10812 if (cfg->verbose_level > 2)
10813 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10815 /* Clear vreg_to_lvreg array */
10816 for (i = 0; i < lvregs_len; i++)
10817 vreg_to_lvreg [lvregs [i]] = 0;
10821 MONO_BB_FOR_EACH_INS (bb, ins) {
10822 const char *spec = INS_INFO (ins->opcode);
10823 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10824 gboolean store, no_lvreg;
10825 int sregs [MONO_MAX_SRC_REGS];
10827 if (G_UNLIKELY (cfg->verbose_level > 2))
10828 mono_print_ins (ins);
10830 if (ins->opcode == OP_NOP)
10834 * We handle LDADDR here as well, since it can only be decomposed
10835 * when variable addresses are known.
10837 if (ins->opcode == OP_LDADDR) {
10838 MonoInst *var = ins->inst_p0;
10840 if (var->opcode == OP_VTARG_ADDR) {
10841 /* Happens on SPARC/S390 where vtypes are passed by reference */
10842 MonoInst *vtaddr = var->inst_left;
10843 if (vtaddr->opcode == OP_REGVAR) {
10844 ins->opcode = OP_MOVE;
10845 ins->sreg1 = vtaddr->dreg;
10847 else if (var->inst_left->opcode == OP_REGOFFSET) {
10848 ins->opcode = OP_LOAD_MEMBASE;
10849 ins->inst_basereg = vtaddr->inst_basereg;
10850 ins->inst_offset = vtaddr->inst_offset;
10854 g_assert (var->opcode == OP_REGOFFSET);
10856 ins->opcode = OP_ADD_IMM;
10857 ins->sreg1 = var->inst_basereg;
10858 ins->inst_imm = var->inst_offset;
10861 *need_local_opts = TRUE;
10862 spec = INS_INFO (ins->opcode);
10865 if (ins->opcode < MONO_CEE_LAST) {
10866 mono_print_ins (ins);
10867 g_assert_not_reached ();
10871 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10875 if (MONO_IS_STORE_MEMBASE (ins)) {
10876 tmp_reg = ins->dreg;
10877 ins->dreg = ins->sreg2;
10878 ins->sreg2 = tmp_reg;
10881 spec2 [MONO_INST_DEST] = ' ';
10882 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10883 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10884 spec2 [MONO_INST_SRC3] = ' ';
10886 } else if (MONO_IS_STORE_MEMINDEX (ins))
10887 g_assert_not_reached ();
10892 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10893 printf ("\t %.3s %d", spec, ins->dreg);
10894 num_sregs = mono_inst_get_src_registers (ins, sregs);
10895 for (srcindex = 0; srcindex < 3; ++srcindex)
10896 printf (" %d", sregs [srcindex]);
10903 regtype = spec [MONO_INST_DEST];
10904 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10907 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10908 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10909 MonoInst *store_ins;
10911 MonoInst *def_ins = ins;
10912 int dreg = ins->dreg; /* The original vreg */
10914 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10916 if (var->opcode == OP_REGVAR) {
10917 ins->dreg = var->dreg;
10918 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10920 * Instead of emitting a load+store, use a _membase opcode.
10922 g_assert (var->opcode == OP_REGOFFSET);
10923 if (ins->opcode == OP_MOVE) {
10927 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10928 ins->inst_basereg = var->inst_basereg;
10929 ins->inst_offset = var->inst_offset;
10932 spec = INS_INFO (ins->opcode);
10936 g_assert (var->opcode == OP_REGOFFSET);
10938 prev_dreg = ins->dreg;
10940 /* Invalidate any previous lvreg for this vreg */
10941 vreg_to_lvreg [ins->dreg] = 0;
10945 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10947 store_opcode = OP_STOREI8_MEMBASE_REG;
10950 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10952 if (regtype == 'l') {
10953 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10954 mono_bblock_insert_after_ins (bb, ins, store_ins);
10955 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10956 mono_bblock_insert_after_ins (bb, ins, store_ins);
10957 def_ins = store_ins;
10960 g_assert (store_opcode != OP_STOREV_MEMBASE);
10962 /* Try to fuse the store into the instruction itself */
10963 /* FIXME: Add more instructions */
10964 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10965 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10966 ins->inst_imm = ins->inst_c0;
10967 ins->inst_destbasereg = var->inst_basereg;
10968 ins->inst_offset = var->inst_offset;
10969 spec = INS_INFO (ins->opcode);
10970 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10971 ins->opcode = store_opcode;
10972 ins->inst_destbasereg = var->inst_basereg;
10973 ins->inst_offset = var->inst_offset;
10977 tmp_reg = ins->dreg;
10978 ins->dreg = ins->sreg2;
10979 ins->sreg2 = tmp_reg;
10982 spec2 [MONO_INST_DEST] = ' ';
10983 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10984 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10985 spec2 [MONO_INST_SRC3] = ' ';
10987 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10988 // FIXME: The backends expect the base reg to be in inst_basereg
10989 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10991 ins->inst_basereg = var->inst_basereg;
10992 ins->inst_offset = var->inst_offset;
10993 spec = INS_INFO (ins->opcode);
10995 /* printf ("INS: "); mono_print_ins (ins); */
10996 /* Create a store instruction */
10997 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10999 /* Insert it after the instruction */
11000 mono_bblock_insert_after_ins (bb, ins, store_ins);
11002 def_ins = store_ins;
11005 * We can't assign ins->dreg to var->dreg here, since the
11006 * sregs could use it. So set a flag, and do it after
11009 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11010 dest_has_lvreg = TRUE;
11015 if (def_ins && !live_range_start [dreg]) {
11016 live_range_start [dreg] = def_ins;
11017 live_range_start_bb [dreg] = bb;
11024 num_sregs = mono_inst_get_src_registers (ins, sregs);
11025 for (srcindex = 0; srcindex < 3; ++srcindex) {
11026 regtype = spec [MONO_INST_SRC1 + srcindex];
11027 sreg = sregs [srcindex];
11029 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11030 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11031 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11032 MonoInst *use_ins = ins;
11033 MonoInst *load_ins;
11034 guint32 load_opcode;
11036 if (var->opcode == OP_REGVAR) {
11037 sregs [srcindex] = var->dreg;
11038 //mono_inst_set_src_registers (ins, sregs);
11039 live_range_end [sreg] = use_ins;
11040 live_range_end_bb [sreg] = bb;
11044 g_assert (var->opcode == OP_REGOFFSET);
11046 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11048 g_assert (load_opcode != OP_LOADV_MEMBASE);
11050 if (vreg_to_lvreg [sreg]) {
11051 g_assert (vreg_to_lvreg [sreg] != -1);
11053 /* The variable is already loaded to an lvreg */
11054 if (G_UNLIKELY (cfg->verbose_level > 2))
11055 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11056 sregs [srcindex] = vreg_to_lvreg [sreg];
11057 //mono_inst_set_src_registers (ins, sregs);
11061 /* Try to fuse the load into the instruction */
11062 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11063 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11064 sregs [0] = var->inst_basereg;
11065 //mono_inst_set_src_registers (ins, sregs);
11066 ins->inst_offset = var->inst_offset;
11067 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11068 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11069 sregs [1] = var->inst_basereg;
11070 //mono_inst_set_src_registers (ins, sregs);
11071 ins->inst_offset = var->inst_offset;
11073 if (MONO_IS_REAL_MOVE (ins)) {
11074 ins->opcode = OP_NOP;
11077 //printf ("%d ", srcindex); mono_print_ins (ins);
11079 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11081 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11082 if (var->dreg == prev_dreg) {
11084 * sreg refers to the value loaded by the load
11085 * emitted below, but we need to use ins->dreg
11086 * since it refers to the store emitted earlier.
11090 g_assert (sreg != -1);
11091 vreg_to_lvreg [var->dreg] = sreg;
11092 g_assert (lvregs_len < 1024);
11093 lvregs [lvregs_len ++] = var->dreg;
11097 sregs [srcindex] = sreg;
11098 //mono_inst_set_src_registers (ins, sregs);
11100 if (regtype == 'l') {
11101 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11102 mono_bblock_insert_before_ins (bb, ins, load_ins);
11103 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11104 mono_bblock_insert_before_ins (bb, ins, load_ins);
11105 use_ins = load_ins;
11108 #if SIZEOF_REGISTER == 4
11109 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11111 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11112 mono_bblock_insert_before_ins (bb, ins, load_ins);
11113 use_ins = load_ins;
11117 if (var->dreg < orig_next_vreg) {
11118 live_range_end [var->dreg] = use_ins;
11119 live_range_end_bb [var->dreg] = bb;
11123 mono_inst_set_src_registers (ins, sregs);
11125 if (dest_has_lvreg) {
11126 g_assert (ins->dreg != -1);
11127 vreg_to_lvreg [prev_dreg] = ins->dreg;
11128 g_assert (lvregs_len < 1024);
11129 lvregs [lvregs_len ++] = prev_dreg;
11130 dest_has_lvreg = FALSE;
11134 tmp_reg = ins->dreg;
11135 ins->dreg = ins->sreg2;
11136 ins->sreg2 = tmp_reg;
11139 if (MONO_IS_CALL (ins)) {
11140 /* Clear vreg_to_lvreg array */
11141 for (i = 0; i < lvregs_len; i++)
11142 vreg_to_lvreg [lvregs [i]] = 0;
11144 } else if (ins->opcode == OP_NOP) {
11146 MONO_INST_NULLIFY_SREGS (ins);
11149 if (cfg->verbose_level > 2)
11150 mono_print_ins_index (1, ins);
11153 /* Extend the live range based on the liveness info */
11154 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11155 for (i = 0; i < cfg->num_varinfo; i ++) {
11156 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11158 if (vreg_is_volatile (cfg, vi->vreg))
11159 /* The liveness info is incomplete */
11162 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11163 /* Live from at least the first ins of this bb */
11164 live_range_start [vi->vreg] = bb->code;
11165 live_range_start_bb [vi->vreg] = bb;
11168 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11169 /* Live at least until the last ins of this bb */
11170 live_range_end [vi->vreg] = bb->last_ins;
11171 live_range_end_bb [vi->vreg] = bb;
11177 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11179 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11180 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11182 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11183 for (i = 0; i < cfg->num_varinfo; ++i) {
11184 int vreg = MONO_VARINFO (cfg, i)->vreg;
11187 if (live_range_start [vreg]) {
11188 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11190 ins->inst_c1 = vreg;
11191 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11193 if (live_range_end [vreg]) {
11194 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11196 ins->inst_c1 = vreg;
11197 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11198 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11200 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11206 g_free (live_range_start);
11207 g_free (live_range_end);
11208 g_free (live_range_start_bb);
11209 g_free (live_range_end_bb);
11214 * - use 'iadd' instead of 'int_add'
11215 * - handling ovf opcodes: decompose in method_to_ir.
11216 * - unify iregs/fregs
11217 * -> partly done, the missing parts are:
11218 * - a more complete unification would involve unifying the hregs as well, so
11219 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11220 * would no longer map to the machine hregs, so the code generators would need to
11221 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11222 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11223 * fp/non-fp branches speeds it up by about 15%.
11224 * - use sext/zext opcodes instead of shifts
11226 * - get rid of TEMPLOADs if possible and use vregs instead
11227 * - clean up usage of OP_P/OP_ opcodes
11228 * - cleanup usage of DUMMY_USE
11229 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11231 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11232 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11233 * - make sure handle_stack_args () is called before the branch is emitted
11234 * - when the new IR is done, get rid of all unused stuff
11235 * - COMPARE/BEQ as separate instructions or unify them ?
11236 * - keeping them separate allows specialized compare instructions like
11237 * compare_imm, compare_membase
11238 * - most back ends unify fp compare+branch, fp compare+ceq
11239 * - integrate mono_save_args into inline_method
11240 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11241 * - handle long shift opts on 32 bit platforms somehow: they require
11242 * 3 sregs (2 for arg1 and 1 for arg2)
11243 * - make byref a 'normal' type.
11244 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11245 * variable if needed.
11246 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11247 * like inline_method.
11248 * - remove inlining restrictions
11249 * - fix LNEG and enable cfold of INEG
11250 * - generalize x86 optimizations like ldelema as a peephole optimization
11251 * - add store_mem_imm for amd64
11252 * - optimize the loading of the interruption flag in the managed->native wrappers
11253 * - avoid special handling of OP_NOP in passes
11254 * - move code inserting instructions into one function/macro.
11255 * - try a coalescing phase after liveness analysis
11256 * - add float -> vreg conversion + local optimizations on !x86
11257 * - figure out how to handle decomposed branches during optimizations, ie.
11258 * compare+branch, op_jump_table+op_br etc.
11259 * - promote RuntimeXHandles to vregs
11260 * - vtype cleanups:
11261 * - add a NEW_VARLOADA_VREG macro
11262 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11263 * accessing vtype fields.
11264 * - get rid of I8CONST on 64 bit platforms
11265 * - dealing with the increase in code size due to branches created during opcode
11267 * - use extended basic blocks
11268 * - all parts of the JIT
11269 * - handle_global_vregs () && local regalloc
11270 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11271 * - sources of increase in code size:
11274 * - isinst and castclass
11275 * - lvregs not allocated to global registers even if used multiple times
11276 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11278 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11279 * - add all micro optimizations from the old JIT
11280 * - put tree optimizations into the deadce pass
11281 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11282 * specific function.
11283 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11284 * fcompare + branchCC.
11285 * - create a helper function for allocating a stack slot, taking into account
11286 * MONO_CFG_HAS_SPILLUP.
11288 * - merge the ia64 switch changes.
11289 * - optimize mono_regstate2_alloc_int/float.
11290 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11291 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11292 * parts of the tree could be separated by other instructions, killing the tree
11293 * arguments, or stores killing loads etc. Also, should we fold loads into other
11294 * instructions if the result of the load is used multiple times ?
11295 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11296 * - LAST MERGE: 108395.
11297 * - when returning vtypes in registers, generate IR and append it to the end of the
11298 * last bb instead of doing it in the epilog.
11299 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11307 - When to decompose opcodes:
11308 - earlier: this makes some optimizations hard to implement, since the low level IR
11309 no longer contains the neccessary information. But it is easier to do.
11310 - later: harder to implement, enables more optimizations.
11311 - Branches inside bblocks:
11312 - created when decomposing complex opcodes.
11313 - branches to another bblock: harmless, but not tracked by the branch
11314 optimizations, so need to branch to a label at the start of the bblock.
11315 - branches to inside the same bblock: very problematic, trips up the local
11316 reg allocator. Can be fixed by spitting the current bblock, but that is a
11317 complex operation, since some local vregs can become global vregs etc.
11318 - Local/global vregs:
11319 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11320 local register allocator.
11321 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11322 structure, created by mono_create_var (). Assigned to hregs or the stack by
11323 the global register allocator.
11324 - When to do optimizations like alu->alu_imm:
11325 - earlier -> saves work later on since the IR will be smaller/simpler
11326 - later -> can work on more instructions
11327 - Handling of valuetypes:
11328 - When a vtype is pushed on the stack, a new temporary is created, an
11329 instruction computing its address (LDADDR) is emitted and pushed on
11330 the stack. Need to optimize cases when the vtype is used immediately as in
11331 argument passing, stloc etc.
11332 - Instead of the to_end stuff in the old JIT, simply call the function handling
11333 the values on the stack before emitting the last instruction of the bb.
11336 #endif /* DISABLE_JIT */