2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/monitor.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
69 #include "jit-icalls.h"
71 #include "debugger-agent.h"
72 #include "seq-points.h"
74 #define BRANCH_COST 10
75 #define INLINE_LENGTH_LIMIT 20
77 /* These have 'cfg' as an implicit argument */
78 #define INLINE_FAILURE(msg) do { \
79 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
80 inline_failure (cfg, msg); \
81 goto exception_exit; \
84 #define CHECK_CFG_EXCEPTION do {\
85 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
86 goto exception_exit; \
88 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
89 method_access_failure ((cfg), (method), (cmethod)); \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 #define DISABLE_AOT(cfg) do { \
113 if ((cfg)->verbose_level >= 2) \
114 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
115 (cfg)->disable_aot = TRUE; \
117 #define LOAD_ERROR do { \
118 break_on_unverified (); \
119 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
120 goto exception_exit; \
123 #define TYPE_LOAD_ERROR(klass) do { \
124 cfg->exception_ptr = klass; \
128 #define CHECK_CFG_ERROR do {\
129 if (!mono_error_ok (&cfg->error)) { \
130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
131 goto mono_error_exit; \
135 /* Determine whenever 'ins' represents a load of the 'this' argument */
136 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
138 static int ldind_to_load_membase (int opcode);
139 static int stind_to_store_membase (int opcode);
141 int mono_op_to_op_imm (int opcode);
142 int mono_op_to_op_imm_noemul (int opcode);
144 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
149 /* helper methods signatures */
150 static MonoMethodSignature *helper_sig_class_init_trampoline;
151 static MonoMethodSignature *helper_sig_domain_get;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
153 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
154 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
156 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
157 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
160 * Instruction metadata
168 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
169 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
175 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
180 /* keep in sync with the enum in mini.h */
183 #include "mini-ops.h"
188 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
189 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
191 * This should contain the index of the last sreg + 1. This is not the same
192 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
194 const gint8 ins_sreg_counts[] = {
195 #include "mini-ops.h"
200 #define MONO_INIT_VARINFO(vi,id) do { \
201 (vi)->range.first_use.pos.bid = 0xffff; \
207 mono_alloc_ireg (MonoCompile *cfg)
209 return alloc_ireg (cfg);
213 mono_alloc_lreg (MonoCompile *cfg)
215 return alloc_lreg (cfg);
219 mono_alloc_freg (MonoCompile *cfg)
221 return alloc_freg (cfg);
225 mono_alloc_preg (MonoCompile *cfg)
227 return alloc_preg (cfg);
231 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
233 return alloc_dreg (cfg, stack_type);
237 * mono_alloc_ireg_ref:
239 * Allocate an IREG, and mark it as holding a GC ref.
242 mono_alloc_ireg_ref (MonoCompile *cfg)
244 return alloc_ireg_ref (cfg);
248 * mono_alloc_ireg_mp:
250 * Allocate an IREG, and mark it as holding a managed pointer.
253 mono_alloc_ireg_mp (MonoCompile *cfg)
255 return alloc_ireg_mp (cfg);
259 * mono_alloc_ireg_copy:
261 * Allocate an IREG with the same GC type as VREG.
264 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
266 if (vreg_is_ref (cfg, vreg))
267 return alloc_ireg_ref (cfg);
268 else if (vreg_is_mp (cfg, vreg))
269 return alloc_ireg_mp (cfg);
271 return alloc_ireg (cfg);
275 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
280 type = mini_get_underlying_type (cfg, type);
282 switch (type->type) {
295 case MONO_TYPE_FNPTR:
297 case MONO_TYPE_CLASS:
298 case MONO_TYPE_STRING:
299 case MONO_TYPE_OBJECT:
300 case MONO_TYPE_SZARRAY:
301 case MONO_TYPE_ARRAY:
305 #if SIZEOF_REGISTER == 8
311 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
314 case MONO_TYPE_VALUETYPE:
315 if (type->data.klass->enumtype) {
316 type = mono_class_enum_basetype (type->data.klass);
319 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
322 case MONO_TYPE_TYPEDBYREF:
324 case MONO_TYPE_GENERICINST:
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->generic_sharing_context);
330 if (mini_type_var_is_vt (cfg, type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
346 printf ("\n%s %d: [IN: ", msg, bb->block_num);
347 for (i = 0; i < bb->in_count; ++i)
348 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 for (i = 0; i < bb->out_count; ++i)
351 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 for (tree = bb->code; tree; tree = tree->next)
354 mono_print_ins_index (-1, tree);
358 mono_create_helper_signatures (void)
360 helper_sig_domain_get = mono_create_icall_signature ("ptr");
361 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
364 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
365 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
366 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
367 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
383 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
384 g_free (method_fname);
385 g_free (cil_method_fname);
388 static MONO_NEVER_INLINE void
389 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
391 char *method_fname = mono_method_full_name (method, TRUE);
392 char *field_fname = mono_field_full_name (field);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
394 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
395 g_free (method_fname);
396 g_free (field_fname);
399 static MONO_NEVER_INLINE void
400 inline_failure (MonoCompile *cfg, const char *msg)
402 if (cfg->verbose_level >= 2)
403 printf ("inline failed: %s\n", msg);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
407 static MONO_NEVER_INLINE void
408 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 if (cfg->verbose_level > 2) \
411 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
415 static MONO_NEVER_INLINE void
416 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
418 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
419 if (cfg->verbose_level >= 2)
420 printf ("%s\n", cfg->exception_message);
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
425 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
426 * foo<T> (int i) { ldarg.0; box T; }
428 #define UNVERIFIED do { \
429 if (cfg->gsharedvt) { \
430 if (cfg->verbose_level > 2) \
431 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
432 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
433 goto exception_exit; \
435 break_on_unverified (); \
439 #define GET_BBLOCK(cfg,tblock,ip) do { \
440 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
442 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
443 NEW_BBLOCK (cfg, (tblock)); \
444 (tblock)->cil_code = (ip); \
445 ADD_BBLOCK (cfg, (tblock)); \
449 #if defined(TARGET_X86) || defined(TARGET_AMD64)
450 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
451 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
452 (dest)->dreg = alloc_ireg_mp ((cfg)); \
453 (dest)->sreg1 = (sr1); \
454 (dest)->sreg2 = (sr2); \
455 (dest)->inst_imm = (imm); \
456 (dest)->backend.shift_amount = (shift); \
457 MONO_ADD_INS ((cfg)->cbb, (dest)); \
461 /* Emit conversions so both operands of a binary opcode are of the same type */
463 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
465 MonoInst *arg1 = *arg1_ref;
466 MonoInst *arg2 = *arg2_ref;
469 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
470 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
473 /* Mixing r4/r8 is allowed by the spec */
474 if (arg1->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
478 conv->type = STACK_R8;
482 if (arg2->type == STACK_R4) {
483 int dreg = alloc_freg (cfg);
485 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
486 conv->type = STACK_R8;
492 #if SIZEOF_REGISTER == 8
493 /* FIXME: Need to add many more cases */
494 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
497 int dr = alloc_preg (cfg);
498 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
499 (ins)->sreg2 = widen->dreg;
504 #define ADD_BINOP(op) do { \
505 MONO_INST_NEW (cfg, ins, (op)); \
507 ins->sreg1 = sp [0]->dreg; \
508 ins->sreg2 = sp [1]->dreg; \
509 type_from_op (cfg, ins, sp [0], sp [1]); \
511 /* Have to insert a widening op */ \
512 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
513 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
518 #define ADD_UNOP(op) do { \
519 MONO_INST_NEW (cfg, ins, (op)); \
521 ins->sreg1 = sp [0]->dreg; \
522 type_from_op (cfg, ins, sp [0], NULL); \
524 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
525 MONO_ADD_INS ((cfg)->cbb, (ins)); \
526 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
529 #define ADD_BINCOND(next_block) do { \
532 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
533 cmp->sreg1 = sp [0]->dreg; \
534 cmp->sreg2 = sp [1]->dreg; \
535 type_from_op (cfg, cmp, sp [0], sp [1]); \
537 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
538 type_from_op (cfg, ins, sp [0], sp [1]); \
539 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
540 GET_BBLOCK (cfg, tblock, target); \
541 link_bblock (cfg, bblock, tblock); \
542 ins->inst_true_bb = tblock; \
543 if ((next_block)) { \
544 link_bblock (cfg, bblock, (next_block)); \
545 ins->inst_false_bb = (next_block); \
546 start_new_bblock = 1; \
548 GET_BBLOCK (cfg, tblock, ip); \
549 link_bblock (cfg, bblock, tblock); \
550 ins->inst_false_bb = tblock; \
551 start_new_bblock = 2; \
553 if (sp != stack_start) { \
554 handle_stack_args (cfg, stack_start, sp - stack_start); \
555 CHECK_UNVERIFIABLE (cfg); \
557 MONO_ADD_INS (bblock, cmp); \
558 MONO_ADD_INS (bblock, ins); \
562 * link_bblock: Links two basic blocks
564 * links two basic blocks in the control flow graph, the 'from'
565 * argument is the starting block and the 'to' argument is the block
566 * the control flow ends to after 'from'.
569 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
571 MonoBasicBlock **newa;
575 if (from->cil_code) {
577 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
579 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
582 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
584 printf ("edge from entry to exit\n");
589 for (i = 0; i < from->out_count; ++i) {
590 if (to == from->out_bb [i]) {
596 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
597 for (i = 0; i < from->out_count; ++i) {
598 newa [i] = from->out_bb [i];
606 for (i = 0; i < to->in_count; ++i) {
607 if (from == to->in_bb [i]) {
613 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
614 for (i = 0; i < to->in_count; ++i) {
615 newa [i] = to->in_bb [i];
624 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
626 link_bblock (cfg, from, to);
630 * mono_find_block_region:
632 * We mark each basic block with a region ID. We use that to avoid BB
633 * optimizations when blocks are in different regions.
636 * A region token that encodes where this region is, and information
637 * about the clause owner for this block.
639 * The region encodes the try/catch/filter clause that owns this block
640 * as well as the type. -1 is a special value that represents a block
641 * that is in none of try/catch/filter.
644 mono_find_block_region (MonoCompile *cfg, int offset)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
650 for (i = 0; i < header->num_clauses; ++i) {
651 clause = &header->clauses [i];
652 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
653 (offset < (clause->handler_offset)))
654 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
656 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
657 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
658 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
659 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
660 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
662 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
665 for (i = 0; i < header->num_clauses; ++i) {
666 clause = &header->clauses [i];
668 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
669 return ((i + 1) << 8) | clause->flags;
676 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
678 MonoMethodHeader *header = cfg->header;
679 MonoExceptionClause *clause;
683 for (i = 0; i < header->num_clauses; ++i) {
684 clause = &header->clauses [i];
685 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
686 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
687 if (clause->flags == type)
688 res = g_list_append (res, clause);
695 mono_create_spvar_for_region (MonoCompile *cfg, int region)
699 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
703 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
704 /* prevent it from being register allocated */
705 var->flags |= MONO_INST_VOLATILE;
707 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
711 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
713 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
717 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
721 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
725 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
726 /* prevent it from being register allocated */
727 var->flags |= MONO_INST_VOLATILE;
729 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
735 * Returns the type used in the eval stack when @type is loaded.
736 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
739 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
743 type = mini_get_underlying_type (cfg, type);
744 inst->klass = klass = mono_class_from_mono_type (type);
746 inst->type = STACK_MP;
751 switch (type->type) {
753 inst->type = STACK_INV;
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->generic_sharing_context);
805 if (mini_is_gsharedvt_type (cfg, type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1171 switch (args->type) {
1181 for (i = 0; i < sig->param_count; ++i) {
1182 switch (args [i].type) {
1186 if (!sig->params [i]->byref)
1190 if (sig->params [i]->byref)
1192 switch (sig->params [i]->type) {
1193 case MONO_TYPE_CLASS:
1194 case MONO_TYPE_STRING:
1195 case MONO_TYPE_OBJECT:
1196 case MONO_TYPE_SZARRAY:
1197 case MONO_TYPE_ARRAY:
1204 if (sig->params [i]->byref)
1206 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1215 /*if (!param_table [args [i].type] [sig->params [i]->type])
1223 * When we need a pointer to the current domain many times in a method, we
1224 * call mono_domain_get() once and we store the result in a local variable.
1225 * This function returns the variable that represents the MonoDomain*.
1227 inline static MonoInst *
1228 mono_get_domainvar (MonoCompile *cfg)
1230 if (!cfg->domainvar)
1231 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1232 return cfg->domainvar;
1236 * The got_var contains the address of the Global Offset Table when AOT
1240 mono_get_got_var (MonoCompile *cfg)
1242 #ifdef MONO_ARCH_NEED_GOT_VAR
1243 if (!cfg->compile_aot)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1255 mono_get_vtable_var (MonoCompile *cfg)
1257 g_assert (cfg->generic_sharing_context);
1259 if (!cfg->rgctx_var) {
1260 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 /* force the var to be stack allocated */
1262 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1265 return cfg->rgctx_var;
1269 type_from_stack_type (MonoInst *ins) {
1270 switch (ins->type) {
1271 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1272 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1273 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1274 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1275 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1277 return &ins->klass->this_arg;
1278 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1279 case STACK_VTYPE: return &ins->klass->byval_arg;
1281 g_error ("stack type %d to monotype not handled\n", ins->type);
1286 static G_GNUC_UNUSED int
1287 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1289 t = mono_type_get_underlying_type (t);
1301 case MONO_TYPE_FNPTR:
1303 case MONO_TYPE_CLASS:
1304 case MONO_TYPE_STRING:
1305 case MONO_TYPE_OBJECT:
1306 case MONO_TYPE_SZARRAY:
1307 case MONO_TYPE_ARRAY:
1313 return cfg->r4_stack_type;
1316 case MONO_TYPE_VALUETYPE:
1317 case MONO_TYPE_TYPEDBYREF:
1319 case MONO_TYPE_GENERICINST:
1320 if (mono_type_generic_inst_is_valuetype (t))
1326 g_assert_not_reached ();
1333 array_access_to_klass (int opcode)
1337 return mono_defaults.byte_class;
1339 return mono_defaults.uint16_class;
1342 return mono_defaults.int_class;
1345 return mono_defaults.sbyte_class;
1348 return mono_defaults.int16_class;
1351 return mono_defaults.int32_class;
1353 return mono_defaults.uint32_class;
1356 return mono_defaults.int64_class;
1359 return mono_defaults.single_class;
1362 return mono_defaults.double_class;
1363 case CEE_LDELEM_REF:
1364 case CEE_STELEM_REF:
1365 return mono_defaults.object_class;
1367 g_assert_not_reached ();
1373 * We try to share variables when possible
1376 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1381 /* inlining can result in deeper stacks */
1382 if (slot >= cfg->header->max_stack)
1383 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1385 pos = ins->type - 1 + slot * STACK_MAX;
1387 switch (ins->type) {
1394 if ((vnum = cfg->intvars [pos]))
1395 return cfg->varinfo [vnum];
1396 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1397 cfg->intvars [pos] = res->inst_c0;
1400 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1406 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1409 * Don't use this if a generic_context is set, since that means AOT can't
1410 * look up the method using just the image+token.
1411 * table == 0 means this is a reference made from a wrapper.
1413 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1414 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1415 jump_info_token->image = image;
1416 jump_info_token->token = token;
1417 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1422 * This function is called to handle items that are left on the evaluation stack
1423 * at basic block boundaries. What happens is that we save the values to local variables
1424 * and we reload them later when first entering the target basic block (with the
1425 * handle_loaded_temps () function).
1426 * A single joint point will use the same variables (stored in the array bb->out_stack or
1427 * bb->in_stack, if the basic block is before or after the joint point).
1429 * This function needs to be called _before_ emitting the last instruction of
1430 * the bb (i.e. before emitting a branch).
1431 * If the stack merge fails at a join point, cfg->unverifiable is set.
1434 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1437 MonoBasicBlock *bb = cfg->cbb;
1438 MonoBasicBlock *outb;
1439 MonoInst *inst, **locals;
1444 if (cfg->verbose_level > 3)
1445 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1446 if (!bb->out_scount) {
1447 bb->out_scount = count;
1448 //printf ("bblock %d has out:", bb->block_num);
1450 for (i = 0; i < bb->out_count; ++i) {
1451 outb = bb->out_bb [i];
1452 /* exception handlers are linked, but they should not be considered for stack args */
1453 if (outb->flags & BB_EXCEPTION_HANDLER)
1455 //printf (" %d", outb->block_num);
1456 if (outb->in_stack) {
1458 bb->out_stack = outb->in_stack;
1464 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1465 for (i = 0; i < count; ++i) {
1467 * try to reuse temps already allocated for this purpouse, if they occupy the same
1468 * stack slot and if they are of the same type.
1469 * This won't cause conflicts since if 'local' is used to
1470 * store one of the values in the in_stack of a bblock, then
1471 * the same variable will be used for the same outgoing stack
1473 * This doesn't work when inlining methods, since the bblocks
1474 * in the inlined methods do not inherit their in_stack from
1475 * the bblock they are inlined to. See bug #58863 for an
1478 if (cfg->inlined_method)
1479 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1481 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1486 for (i = 0; i < bb->out_count; ++i) {
1487 outb = bb->out_bb [i];
1488 /* exception handlers are linked, but they should not be considered for stack args */
1489 if (outb->flags & BB_EXCEPTION_HANDLER)
1491 if (outb->in_scount) {
1492 if (outb->in_scount != bb->out_scount) {
1493 cfg->unverifiable = TRUE;
1496 continue; /* check they are the same locals */
1498 outb->in_scount = count;
1499 outb->in_stack = bb->out_stack;
1502 locals = bb->out_stack;
1504 for (i = 0; i < count; ++i) {
1505 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1506 inst->cil_code = sp [i]->cil_code;
1507 sp [i] = locals [i];
1508 if (cfg->verbose_level > 3)
1509 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1513 * It is possible that the out bblocks already have in_stack assigned, and
1514 * the in_stacks differ. In this case, we will store to all the different
1521 /* Find a bblock which has a different in_stack */
1523 while (bindex < bb->out_count) {
1524 outb = bb->out_bb [bindex];
1525 /* exception handlers are linked, but they should not be considered for stack args */
1526 if (outb->flags & BB_EXCEPTION_HANDLER) {
1530 if (outb->in_stack != locals) {
1531 for (i = 0; i < count; ++i) {
1532 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1533 inst->cil_code = sp [i]->cil_code;
1534 sp [i] = locals [i];
1535 if (cfg->verbose_level > 3)
1536 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1538 locals = outb->in_stack;
1548 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1550 int ibitmap_reg = alloc_preg (cfg);
1551 #ifdef COMPRESSED_INTERFACE_BITMAP
1553 MonoInst *res, *ins;
1554 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1555 MONO_ADD_INS (cfg->cbb, ins);
1557 if (cfg->compile_aot)
1558 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1560 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1561 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1562 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1564 int ibitmap_byte_reg = alloc_preg (cfg);
1566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1568 if (cfg->compile_aot) {
1569 int iid_reg = alloc_preg (cfg);
1570 int shifted_iid_reg = alloc_preg (cfg);
1571 int ibitmap_byte_address_reg = alloc_preg (cfg);
1572 int masked_iid_reg = alloc_preg (cfg);
1573 int iid_one_bit_reg = alloc_preg (cfg);
1574 int iid_bit_reg = alloc_preg (cfg);
1575 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1577 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1580 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1581 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1582 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1584 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1585 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1591 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1592 * stored in "klass_reg" implements the interface "klass".
1595 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1597 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1601 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1602 * stored in "vtable_reg" implements the interface "klass".
1605 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1607 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1611 * Emit code which checks whenever the interface id of @klass is smaller than
1612 * than the value given by max_iid_reg.
1615 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1616 MonoBasicBlock *false_target)
1618 if (cfg->compile_aot) {
1619 int iid_reg = alloc_preg (cfg);
1620 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1621 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1626 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1628 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1631 /* Same as above, but obtains max_iid from a vtable */
1633 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1634 MonoBasicBlock *false_target)
1636 int max_iid_reg = alloc_preg (cfg);
1638 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1639 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1642 /* Same as above, but obtains max_iid from a klass */
1644 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1645 MonoBasicBlock *false_target)
1647 int max_iid_reg = alloc_preg (cfg);
1649 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1650 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1654 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1656 int idepth_reg = alloc_preg (cfg);
1657 int stypes_reg = alloc_preg (cfg);
1658 int stype = alloc_preg (cfg);
1660 mono_class_setup_supertypes (klass);
1662 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1663 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1670 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1671 } else if (cfg->compile_aot) {
1672 int const_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1682 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1684 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1688 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1690 int intf_reg = alloc_preg (cfg);
1692 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1693 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1698 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1702 * Variant of the above that takes a register to the class, not the vtable.
1705 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 int intf_bit_reg = alloc_preg (cfg);
1709 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1710 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1715 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1719 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1722 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1723 } else if (cfg->compile_aot) {
1724 int const_reg = alloc_preg (cfg);
1725 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1726 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1730 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1734 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1736 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1740 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1742 if (cfg->compile_aot) {
1743 int const_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1745 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1753 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1756 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1759 int rank_reg = alloc_preg (cfg);
1760 int eclass_reg = alloc_preg (cfg);
1762 g_assert (!klass_inst);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1765 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1766 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1768 if (klass->cast_class == mono_defaults.object_class) {
1769 int parent_reg = alloc_preg (cfg);
1770 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1771 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1772 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1773 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1774 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1775 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1776 } else if (klass->cast_class == mono_defaults.enum_class) {
1777 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1778 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1779 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1781 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1782 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1785 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1786 /* Check that the object is a vector too */
1787 int bounds_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1790 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1793 int idepth_reg = alloc_preg (cfg);
1794 int stypes_reg = alloc_preg (cfg);
1795 int stype = alloc_preg (cfg);
1797 mono_class_setup_supertypes (klass);
1799 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1802 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1806 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1811 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1813 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1817 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1821 g_assert (val == 0);
1826 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1829 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1832 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1835 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1837 #if SIZEOF_REGISTER == 8
1839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1845 val_reg = alloc_preg (cfg);
1847 if (SIZEOF_REGISTER == 8)
1848 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1850 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1853 /* This could be optimized further if neccesary */
1855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1862 #if !NO_UNALIGNED_ACCESS
1863 if (SIZEOF_REGISTER == 8) {
1865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1895 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1902 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1903 g_assert (size < 10000);
1906 /* This could be optimized further if neccesary */
1908 cur_reg = alloc_preg (cfg);
1909 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1917 #if !NO_UNALIGNED_ACCESS
1918 if (SIZEOF_REGISTER == 8) {
1920 cur_reg = alloc_preg (cfg);
1921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1931 cur_reg = alloc_preg (cfg);
1932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1939 cur_reg = alloc_preg (cfg);
1940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1941 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1947 cur_reg = alloc_preg (cfg);
1948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1949 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1957 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1961 if (cfg->compile_aot) {
1962 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1963 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1965 ins->sreg2 = c->dreg;
1966 MONO_ADD_INS (cfg->cbb, ins);
1968 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1970 ins->inst_offset = mini_get_tls_offset (tls_key);
1971 MONO_ADD_INS (cfg->cbb, ins);
1978 * Emit IR to push the current LMF onto the LMF stack.
1981 emit_push_lmf (MonoCompile *cfg)
1984 * Emit IR to push the LMF:
1985 * lmf_addr = <lmf_addr from tls>
1986 * lmf->lmf_addr = lmf_addr
1987 * lmf->prev_lmf = *lmf_addr
1990 int lmf_reg, prev_lmf_reg;
1991 MonoInst *ins, *lmf_ins;
1996 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1997 /* Load current lmf */
1998 lmf_ins = mono_get_lmf_intrinsic (cfg);
2000 MONO_ADD_INS (cfg->cbb, lmf_ins);
2001 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2002 lmf_reg = ins->dreg;
2003 /* Save previous_lmf */
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2006 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2009 * Store lmf_addr in a variable, so it can be allocated to a global register.
2011 if (!cfg->lmf_addr_var)
2012 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2015 ins = mono_get_jit_tls_intrinsic (cfg);
2017 int jit_tls_dreg = ins->dreg;
2019 MONO_ADD_INS (cfg->cbb, ins);
2020 lmf_reg = alloc_preg (cfg);
2021 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2023 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2026 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2028 MONO_ADD_INS (cfg->cbb, lmf_ins);
2031 MonoInst *args [16], *jit_tls_ins, *ins;
2033 /* Inline mono_get_lmf_addr () */
2034 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2036 /* Load mono_jit_tls_id */
2037 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2038 /* call pthread_getspecific () */
2039 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2040 /* lmf_addr = &jit_tls->lmf */
2041 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2044 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2048 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2050 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2051 lmf_reg = ins->dreg;
2053 prev_lmf_reg = alloc_preg (cfg);
2054 /* Save previous_lmf */
2055 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2056 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2058 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2065 * Emit IR to pop the current LMF from the LMF stack.
2068 emit_pop_lmf (MonoCompile *cfg)
2070 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2076 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2077 lmf_reg = ins->dreg;
2079 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2080 /* Load previous_lmf */
2081 prev_lmf_reg = alloc_preg (cfg);
2082 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2084 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2087 * Emit IR to pop the LMF:
2088 * *(lmf->lmf_addr) = lmf->prev_lmf
2090 /* This could be called before emit_push_lmf () */
2091 if (!cfg->lmf_addr_var)
2092 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2093 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2095 prev_lmf_reg = alloc_preg (cfg);
2096 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2097 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2102 emit_instrumentation_call (MonoCompile *cfg, void *func)
2104 MonoInst *iargs [1];
2107 * Avoid instrumenting inlined methods since it can
2108 * distort profiling results.
2110 if (cfg->method != cfg->current_method)
2113 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2114 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2115 mono_emit_jit_icall (cfg, func, iargs);
2120 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2123 type = mini_get_underlying_type (cfg, type);
2124 switch (type->type) {
2125 case MONO_TYPE_VOID:
2126 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2133 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2137 case MONO_TYPE_FNPTR:
2138 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2139 case MONO_TYPE_CLASS:
2140 case MONO_TYPE_STRING:
2141 case MONO_TYPE_OBJECT:
2142 case MONO_TYPE_SZARRAY:
2143 case MONO_TYPE_ARRAY:
2144 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2147 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2150 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2152 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2154 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2155 case MONO_TYPE_VALUETYPE:
2156 if (type->data.klass->enumtype) {
2157 type = mono_class_enum_basetype (type->data.klass);
2160 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2161 case MONO_TYPE_TYPEDBYREF:
2162 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2163 case MONO_TYPE_GENERICINST:
2164 type = &type->data.generic_class->container_class->byval_arg;
2167 case MONO_TYPE_MVAR:
2169 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2171 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2177 * target_type_is_incompatible:
2178 * @cfg: MonoCompile context
2180 * Check that the item @arg on the evaluation stack can be stored
2181 * in the target type (can be a local, or field, etc).
2182 * The cfg arg can be used to check if we need verification or just
2185 * Returns: non-0 value if arg can't be stored on a target.
2188 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2190 MonoType *simple_type;
2193 if (target->byref) {
2194 /* FIXME: check that the pointed to types match */
2195 if (arg->type == STACK_MP)
2196 return arg->klass != mono_class_from_mono_type (target);
2197 if (arg->type == STACK_PTR)
2202 simple_type = mini_get_underlying_type (cfg, target);
2203 switch (simple_type->type) {
2204 case MONO_TYPE_VOID:
2212 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2216 /* STACK_MP is needed when setting pinned locals */
2217 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2222 case MONO_TYPE_FNPTR:
2224 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2225 * in native int. (#688008).
2227 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2230 case MONO_TYPE_CLASS:
2231 case MONO_TYPE_STRING:
2232 case MONO_TYPE_OBJECT:
2233 case MONO_TYPE_SZARRAY:
2234 case MONO_TYPE_ARRAY:
2235 if (arg->type != STACK_OBJ)
2237 /* FIXME: check type compatibility */
2241 if (arg->type != STACK_I8)
2245 if (arg->type != cfg->r4_stack_type)
2249 if (arg->type != STACK_R8)
2252 case MONO_TYPE_VALUETYPE:
2253 if (arg->type != STACK_VTYPE)
2255 klass = mono_class_from_mono_type (simple_type);
2256 if (klass != arg->klass)
2259 case MONO_TYPE_TYPEDBYREF:
2260 if (arg->type != STACK_VTYPE)
2262 klass = mono_class_from_mono_type (simple_type);
2263 if (klass != arg->klass)
2266 case MONO_TYPE_GENERICINST:
2267 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2268 if (arg->type != STACK_VTYPE)
2270 klass = mono_class_from_mono_type (simple_type);
2271 /* The second cases is needed when doing partial sharing */
2272 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2276 if (arg->type != STACK_OBJ)
2278 /* FIXME: check type compatibility */
2282 case MONO_TYPE_MVAR:
2283 g_assert (cfg->generic_sharing_context);
2284 if (mini_type_var_is_vt (cfg, simple_type)) {
2285 if (arg->type != STACK_VTYPE)
2288 if (arg->type != STACK_OBJ)
2293 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2299 * Prepare arguments for passing to a function call.
2300 * Return a non-zero value if the arguments can't be passed to the given
2302 * The type checks are not yet complete and some conversions may need
2303 * casts on 32 or 64 bit architectures.
2305 * FIXME: implement this using target_type_is_incompatible ()
2308 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2310 MonoType *simple_type;
2314 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2318 for (i = 0; i < sig->param_count; ++i) {
2319 if (sig->params [i]->byref) {
2320 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2324 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2326 switch (simple_type->type) {
2327 case MONO_TYPE_VOID:
2336 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2342 case MONO_TYPE_FNPTR:
2343 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2346 case MONO_TYPE_CLASS:
2347 case MONO_TYPE_STRING:
2348 case MONO_TYPE_OBJECT:
2349 case MONO_TYPE_SZARRAY:
2350 case MONO_TYPE_ARRAY:
2351 if (args [i]->type != STACK_OBJ)
2356 if (args [i]->type != STACK_I8)
2360 if (args [i]->type != cfg->r4_stack_type)
2364 if (args [i]->type != STACK_R8)
2367 case MONO_TYPE_VALUETYPE:
2368 if (simple_type->data.klass->enumtype) {
2369 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2372 if (args [i]->type != STACK_VTYPE)
2375 case MONO_TYPE_TYPEDBYREF:
2376 if (args [i]->type != STACK_VTYPE)
2379 case MONO_TYPE_GENERICINST:
2380 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2383 case MONO_TYPE_MVAR:
2385 if (args [i]->type != STACK_VTYPE)
2389 g_error ("unknown type 0x%02x in check_call_signature",
2397 callvirt_to_call (int opcode)
2400 case OP_CALL_MEMBASE:
2402 case OP_VOIDCALL_MEMBASE:
2404 case OP_FCALL_MEMBASE:
2406 case OP_RCALL_MEMBASE:
2408 case OP_VCALL_MEMBASE:
2410 case OP_LCALL_MEMBASE:
2413 g_assert_not_reached ();
2419 /* Either METHOD or IMT_ARG needs to be set */
2421 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2425 if (COMPILE_LLVM (cfg)) {
2426 method_reg = alloc_preg (cfg);
2429 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2430 } else if (cfg->compile_aot) {
2431 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2434 MONO_INST_NEW (cfg, ins, OP_PCONST);
2435 ins->inst_p0 = method;
2436 ins->dreg = method_reg;
2437 MONO_ADD_INS (cfg->cbb, ins);
2441 call->imt_arg_reg = method_reg;
2443 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2447 method_reg = alloc_preg (cfg);
2450 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2451 } else if (cfg->compile_aot) {
2452 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2455 MONO_INST_NEW (cfg, ins, OP_PCONST);
2456 ins->inst_p0 = method;
2457 ins->dreg = method_reg;
2458 MONO_ADD_INS (cfg->cbb, ins);
2461 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2464 static MonoJumpInfo *
2465 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2467 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2471 ji->data.target = target;
2477 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2479 if (cfg->generic_sharing_context)
2480 return mono_class_check_context_used (klass);
2486 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2488 if (cfg->generic_sharing_context)
2489 return mono_method_check_context_used (method);
2495 * check_method_sharing:
2497 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2500 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2502 gboolean pass_vtable = FALSE;
2503 gboolean pass_mrgctx = FALSE;
2505 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2506 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2507 gboolean sharable = FALSE;
2509 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2512 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2513 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2514 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2516 sharable = sharing_enabled && context_sharable;
2520 * Pass vtable iff target method might
2521 * be shared, which means that sharing
2522 * is enabled for its class and its
2523 * context is sharable (and it's not a
2526 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2530 if (mini_method_get_context (cmethod) &&
2531 mini_method_get_context (cmethod)->method_inst) {
2532 g_assert (!pass_vtable);
2534 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2537 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2538 MonoGenericContext *context = mini_method_get_context (cmethod);
2539 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2541 if (sharing_enabled && context_sharable)
2543 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2548 if (out_pass_vtable)
2549 *out_pass_vtable = pass_vtable;
2550 if (out_pass_mrgctx)
2551 *out_pass_mrgctx = pass_mrgctx;
2554 inline static MonoCallInst *
2555 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2556 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2560 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2565 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2567 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2569 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2572 call->signature = sig;
2573 call->rgctx_reg = rgctx;
2574 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2576 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2579 if (mini_type_is_vtype (cfg, sig_ret)) {
2580 call->vret_var = cfg->vret_addr;
2581 //g_assert_not_reached ();
2583 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2584 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2587 temp->backend.is_pinvoke = sig->pinvoke;
2590 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2591 * address of return value to increase optimization opportunities.
2592 * Before vtype decomposition, the dreg of the call ins itself represents the
2593 * fact the call modifies the return value. After decomposition, the call will
2594 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2595 * will be transformed into an LDADDR.
2597 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2598 loada->dreg = alloc_preg (cfg);
2599 loada->inst_p0 = temp;
2600 /* We reference the call too since call->dreg could change during optimization */
2601 loada->inst_p1 = call;
2602 MONO_ADD_INS (cfg->cbb, loada);
2604 call->inst.dreg = temp->dreg;
2606 call->vret_var = loada;
2607 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2608 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2610 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2611 if (COMPILE_SOFT_FLOAT (cfg)) {
2613 * If the call has a float argument, we would need to do an r8->r4 conversion using
2614 * an icall, but that cannot be done during the call sequence since it would clobber
2615 * the call registers + the stack. So we do it before emitting the call.
2617 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2619 MonoInst *in = call->args [i];
2621 if (i >= sig->hasthis)
2622 t = sig->params [i - sig->hasthis];
2624 t = &mono_defaults.int_class->byval_arg;
2625 t = mono_type_get_underlying_type (t);
2627 if (!t->byref && t->type == MONO_TYPE_R4) {
2628 MonoInst *iargs [1];
2632 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2634 /* The result will be in an int vreg */
2635 call->args [i] = conv;
2641 call->need_unbox_trampoline = unbox_trampoline;
2644 if (COMPILE_LLVM (cfg))
2645 mono_llvm_emit_call (cfg, call);
2647 mono_arch_emit_call (cfg, call);
2649 mono_arch_emit_call (cfg, call);
2652 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2653 cfg->flags |= MONO_CFG_HAS_CALLS;
2659 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2661 #ifdef MONO_ARCH_RGCTX_REG
2662 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2663 cfg->uses_rgctx_reg = TRUE;
2664 call->rgctx_reg = TRUE;
2666 call->rgctx_arg_reg = rgctx_reg;
2673 inline static MonoInst*
2674 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2679 gboolean check_sp = FALSE;
2681 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2682 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2684 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2689 rgctx_reg = mono_alloc_preg (cfg);
2690 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2694 if (!cfg->stack_inbalance_var)
2695 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2697 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2698 ins->dreg = cfg->stack_inbalance_var->dreg;
2699 MONO_ADD_INS (cfg->cbb, ins);
2702 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2704 call->inst.sreg1 = addr->dreg;
2707 emit_imt_argument (cfg, call, NULL, imt_arg);
2709 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2714 sp_reg = mono_alloc_preg (cfg);
2716 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2718 MONO_ADD_INS (cfg->cbb, ins);
2720 /* Restore the stack so we don't crash when throwing the exception */
2721 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2722 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2723 MONO_ADD_INS (cfg->cbb, ins);
2725 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2730 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2732 return (MonoInst*)call;
2736 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2739 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2741 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2744 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2745 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2747 #ifndef DISABLE_REMOTING
2748 gboolean might_be_remote = FALSE;
2750 gboolean virtual = this != NULL;
2751 gboolean enable_for_aot = TRUE;
2755 gboolean need_unbox_trampoline;
2758 sig = mono_method_signature (method);
2761 rgctx_reg = mono_alloc_preg (cfg);
2762 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2765 if (method->string_ctor) {
2766 /* Create the real signature */
2767 /* FIXME: Cache these */
2768 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2769 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2774 context_used = mini_method_check_context_used (cfg, method);
2776 #ifndef DISABLE_REMOTING
2777 might_be_remote = this && sig->hasthis &&
2778 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2779 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2781 if (might_be_remote && context_used) {
2784 g_assert (cfg->generic_sharing_context);
2786 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2788 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2792 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2794 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2796 #ifndef DISABLE_REMOTING
2797 if (might_be_remote)
2798 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2801 call->method = method;
2802 call->inst.flags |= MONO_INST_HAS_METHOD;
2803 call->inst.inst_left = this;
2804 call->tail_call = tail;
2807 int vtable_reg, slot_reg, this_reg;
2810 this_reg = this->dreg;
2812 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2813 MonoInst *dummy_use;
2815 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2817 /* Make a call to delegate->invoke_impl */
2818 call->inst.inst_basereg = this_reg;
2819 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2820 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2822 /* We must emit a dummy use here because the delegate trampoline will
2823 replace the 'this' argument with the delegate target making this activation
2824 no longer a root for the delegate.
2825 This is an issue for delegates that target collectible code such as dynamic
2826 methods of GC'able assemblies.
2828 For a test case look into #667921.
2830 FIXME: a dummy use is not the best way to do it as the local register allocator
2831 will put it on a caller save register and spil it around the call.
2832 Ideally, we would either put it on a callee save register or only do the store part.
2834 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2836 return (MonoInst*)call;
2839 if ((!cfg->compile_aot || enable_for_aot) &&
2840 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2841 (MONO_METHOD_IS_FINAL (method) &&
2842 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2843 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2845 * the method is not virtual, we just need to ensure this is not null
2846 * and then we can call the method directly.
2848 #ifndef DISABLE_REMOTING
2849 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2851 * The check above ensures method is not gshared, this is needed since
2852 * gshared methods can't have wrappers.
2854 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2858 if (!method->string_ctor)
2859 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2861 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2862 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2864 * the method is virtual, but we can statically dispatch since either
2865 * it's class or the method itself are sealed.
2866 * But first we need to ensure it's not a null reference.
2868 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2870 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2872 vtable_reg = alloc_preg (cfg);
2873 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2874 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2875 guint32 imt_slot = mono_method_get_imt_slot (method);
2876 emit_imt_argument (cfg, call, call->method, imt_arg);
2877 slot_reg = vtable_reg;
2878 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2880 slot_reg = vtable_reg;
2881 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2882 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2884 g_assert (mono_method_signature (method)->generic_param_count);
2885 emit_imt_argument (cfg, call, call->method, imt_arg);
2889 call->inst.sreg1 = slot_reg;
2890 call->inst.inst_offset = offset;
2891 call->virtual = TRUE;
2895 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2898 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2900 return (MonoInst*)call;
2904 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2906 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2910 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2917 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2920 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2922 return (MonoInst*)call;
2926 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2928 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2932 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2936 * mono_emit_abs_call:
2938 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2940 inline static MonoInst*
2941 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2942 MonoMethodSignature *sig, MonoInst **args)
2944 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2948 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2951 if (cfg->abs_patches == NULL)
2952 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2953 g_hash_table_insert (cfg->abs_patches, ji, ji);
2954 ins = mono_emit_native_call (cfg, ji, sig, args);
2955 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2960 direct_icalls_enabled (MonoCompile *cfg)
2962 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2964 if (cfg->compile_llvm)
2967 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2973 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
2976 * Call the jit icall without a wrapper if possible.
2977 * The wrapper is needed for the following reasons:
2978 * - to handle exceptions thrown using mono_raise_exceptions () from the
2979 * icall function. The EH code needs the lmf frame pushed by the
2980 * wrapper to be able to unwind back to managed code.
2981 * - to be able to do stack walks for asynchronously suspended
2982 * threads when debugging.
2984 if (info->no_raise && direct_icalls_enabled (cfg)) {
2988 if (!info->wrapper_method) {
2989 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2990 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2992 mono_memory_barrier ();
2996 * Inline the wrapper method, which is basically a call to the C icall, and
2997 * an exception check.
2999 costs = inline_method (cfg, info->wrapper_method, NULL,
3000 args, NULL, cfg->real_offset, TRUE, out_cbb);
3001 g_assert (costs > 0);
3002 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3006 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3011 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3013 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3014 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3018 * Native code might return non register sized integers
3019 * without initializing the upper bits.
3021 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3022 case OP_LOADI1_MEMBASE:
3023 widen_op = OP_ICONV_TO_I1;
3025 case OP_LOADU1_MEMBASE:
3026 widen_op = OP_ICONV_TO_U1;
3028 case OP_LOADI2_MEMBASE:
3029 widen_op = OP_ICONV_TO_I2;
3031 case OP_LOADU2_MEMBASE:
3032 widen_op = OP_ICONV_TO_U2;
3038 if (widen_op != -1) {
3039 int dreg = alloc_preg (cfg);
3042 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3043 widen->type = ins->type;
3053 get_memcpy_method (void)
3055 static MonoMethod *memcpy_method = NULL;
3056 if (!memcpy_method) {
3057 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3059 g_error ("Old corlib found. Install a new one");
3061 return memcpy_method;
3065 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3067 MonoClassField *field;
3068 gpointer iter = NULL;
3070 while ((field = mono_class_get_fields (klass, &iter))) {
3073 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3075 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3076 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3077 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3078 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3080 MonoClass *field_class = mono_class_from_mono_type (field->type);
3081 if (field_class->has_references)
3082 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3088 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3090 int card_table_shift_bits;
3091 gpointer card_table_mask;
3093 MonoInst *dummy_use;
3094 int nursery_shift_bits;
3095 size_t nursery_size;
3096 gboolean has_card_table_wb = FALSE;
3098 if (!cfg->gen_write_barriers)
3101 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3103 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3105 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3106 has_card_table_wb = TRUE;
3109 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3112 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3113 wbarrier->sreg1 = ptr->dreg;
3114 wbarrier->sreg2 = value->dreg;
3115 MONO_ADD_INS (cfg->cbb, wbarrier);
3116 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3117 int offset_reg = alloc_preg (cfg);
3118 int card_reg = alloc_preg (cfg);
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3122 if (card_table_mask)
3123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3125 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3126 * IMM's larger than 32bits.
3128 if (cfg->compile_aot) {
3129 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3131 MONO_INST_NEW (cfg, ins, OP_PCONST);
3132 ins->inst_p0 = card_table;
3133 ins->dreg = card_reg;
3134 MONO_ADD_INS (cfg->cbb, ins);
3137 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3138 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3140 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3141 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3144 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3148 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3150 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3151 unsigned need_wb = 0;
3156 /*types with references can't have alignment smaller than sizeof(void*) */
3157 if (align < SIZEOF_VOID_P)
3160 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3161 if (size > 32 * SIZEOF_VOID_P)
3164 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3166 /* We don't unroll more than 5 stores to avoid code bloat. */
3167 if (size > 5 * SIZEOF_VOID_P) {
3168 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3169 size += (SIZEOF_VOID_P - 1);
3170 size &= ~(SIZEOF_VOID_P - 1);
3172 EMIT_NEW_ICONST (cfg, iargs [2], size);
3173 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3174 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3178 destreg = iargs [0]->dreg;
3179 srcreg = iargs [1]->dreg;
3182 dest_ptr_reg = alloc_preg (cfg);
3183 tmp_reg = alloc_preg (cfg);
3186 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3188 while (size >= SIZEOF_VOID_P) {
3189 MonoInst *load_inst;
3190 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3191 load_inst->dreg = tmp_reg;
3192 load_inst->inst_basereg = srcreg;
3193 load_inst->inst_offset = offset;
3194 MONO_ADD_INS (cfg->cbb, load_inst);
3196 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3199 emit_write_barrier (cfg, iargs [0], load_inst);
3201 offset += SIZEOF_VOID_P;
3202 size -= SIZEOF_VOID_P;
3205 /*tmp += sizeof (void*)*/
3206 if (size >= SIZEOF_VOID_P) {
3207 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3208 MONO_ADD_INS (cfg->cbb, iargs [0]);
3212 /* Those cannot be references since size < sizeof (void*) */
3214 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3215 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3221 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3228 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3229 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3238 * Emit code to copy a valuetype of type @klass whose address is stored in
3239 * @src->dreg to memory whose address is stored at @dest->dreg.
3242 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3244 MonoInst *iargs [4];
3247 MonoMethod *memcpy_method;
3248 MonoInst *size_ins = NULL;
3249 MonoInst *memcpy_ins = NULL;
3252 if (cfg->generic_sharing_context)
3253 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3256 * This check breaks with spilled vars... need to handle it during verification anyway.
3257 * g_assert (klass && klass == src->klass && klass == dest->klass);
3260 if (mini_is_gsharedvt_klass (cfg, klass)) {
3262 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3263 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3267 n = mono_class_native_size (klass, &align);
3269 n = mono_class_value_size (klass, &align);
3271 /* if native is true there should be no references in the struct */
3272 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3273 /* Avoid barriers when storing to the stack */
3274 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3275 (dest->opcode == OP_LDADDR))) {
3281 context_used = mini_class_check_context_used (cfg, klass);
3283 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3284 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3286 } else if (context_used) {
3287 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3289 if (cfg->compile_aot) {
3290 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3292 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3293 mono_class_compute_gc_descriptor (klass);
3298 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3300 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3305 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3306 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3307 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3312 iargs [2] = size_ins;
3314 EMIT_NEW_ICONST (cfg, iargs [2], n);
3316 memcpy_method = get_memcpy_method ();
3318 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3320 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3325 get_memset_method (void)
3327 static MonoMethod *memset_method = NULL;
3328 if (!memset_method) {
3329 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3331 g_error ("Old corlib found. Install a new one");
3333 return memset_method;
3337 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3339 MonoInst *iargs [3];
3342 MonoMethod *memset_method;
3343 MonoInst *size_ins = NULL;
3344 MonoInst *bzero_ins = NULL;
3345 static MonoMethod *bzero_method;
3347 /* FIXME: Optimize this for the case when dest is an LDADDR */
3348 mono_class_init (klass);
3349 if (mini_is_gsharedvt_klass (cfg, klass)) {
3350 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3351 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3353 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3354 g_assert (bzero_method);
3356 iargs [1] = size_ins;
3357 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3361 n = mono_class_value_size (klass, &align);
3363 if (n <= sizeof (gpointer) * 8) {
3364 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3367 memset_method = get_memset_method ();
3369 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3370 EMIT_NEW_ICONST (cfg, iargs [2], n);
3371 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3376 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3378 MonoInst *this = NULL;
3380 g_assert (cfg->generic_sharing_context);
3382 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3383 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3384 !method->klass->valuetype)
3385 EMIT_NEW_ARGLOAD (cfg, this, 0);
3387 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3388 MonoInst *mrgctx_loc, *mrgctx_var;
3391 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3393 mrgctx_loc = mono_get_vtable_var (cfg);
3394 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3397 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3398 MonoInst *vtable_loc, *vtable_var;
3402 vtable_loc = mono_get_vtable_var (cfg);
3403 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3405 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3406 MonoInst *mrgctx_var = vtable_var;
3409 vtable_reg = alloc_preg (cfg);
3410 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3411 vtable_var->type = STACK_PTR;
3419 vtable_reg = alloc_preg (cfg);
3420 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3425 static MonoJumpInfoRgctxEntry *
3426 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3428 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3429 res->method = method;
3430 res->in_mrgctx = in_mrgctx;
3431 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3432 res->data->type = patch_type;
3433 res->data->data.target = patch_data;
3434 res->info_type = info_type;
3439 static inline MonoInst*
3440 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3442 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3446 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3447 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3449 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3450 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3452 return emit_rgctx_fetch (cfg, rgctx, entry);
3456 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3457 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3459 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3460 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3462 return emit_rgctx_fetch (cfg, rgctx, entry);
3466 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3467 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3469 MonoJumpInfoGSharedVtCall *call_info;
3470 MonoJumpInfoRgctxEntry *entry;
3473 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3474 call_info->sig = sig;
3475 call_info->method = cmethod;
3477 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3478 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3480 return emit_rgctx_fetch (cfg, rgctx, entry);
3484 * emit_get_rgctx_virt_method:
3486 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3489 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3490 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3492 MonoJumpInfoVirtMethod *info;
3493 MonoJumpInfoRgctxEntry *entry;
3496 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3497 info->klass = klass;
3498 info->method = virt_method;
3500 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3501 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3503 return emit_rgctx_fetch (cfg, rgctx, entry);
3507 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3508 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3510 MonoJumpInfoRgctxEntry *entry;
3513 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3514 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3516 return emit_rgctx_fetch (cfg, rgctx, entry);
3520 * emit_get_rgctx_method:
3522 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3523 * normal constants, else emit a load from the rgctx.
3526 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3527 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3529 if (!context_used) {
3532 switch (rgctx_type) {
3533 case MONO_RGCTX_INFO_METHOD:
3534 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3536 case MONO_RGCTX_INFO_METHOD_RGCTX:
3537 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3540 g_assert_not_reached ();
3543 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3544 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3546 return emit_rgctx_fetch (cfg, rgctx, entry);
3551 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3552 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3554 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3555 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3557 return emit_rgctx_fetch (cfg, rgctx, entry);
3561 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3563 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3564 MonoRuntimeGenericContextInfoTemplate *template;
3569 for (i = 0; i < info->num_entries; ++i) {
3570 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3572 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3576 if (info->num_entries == info->count_entries) {
3577 MonoRuntimeGenericContextInfoTemplate *new_entries;
3578 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3580 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3582 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3583 info->entries = new_entries;
3584 info->count_entries = new_count_entries;
3587 idx = info->num_entries;
3588 template = &info->entries [idx];
3589 template->info_type = rgctx_type;
3590 template->data = data;
3592 info->num_entries ++;
3598 * emit_get_gsharedvt_info:
3600 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3603 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3608 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3609 /* Load info->entries [idx] */
3610 dreg = alloc_preg (cfg);
3611 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3617 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3619 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3623 * On return the caller must check @klass for load errors.
3626 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3628 MonoInst *vtable_arg;
3632 context_used = mini_class_check_context_used (cfg, klass);
3635 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3636 klass, MONO_RGCTX_INFO_VTABLE);
3638 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3642 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3645 if (COMPILE_LLVM (cfg))
3646 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3648 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3649 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3650 cfg->uses_vtable_reg = TRUE;
3654 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3658 if (cfg->gen_seq_points && cfg->method == method) {
3659 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3661 ins->flags |= MONO_INST_NONEMPTY_STACK;
3662 MONO_ADD_INS (cfg->cbb, ins);
3667 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3669 if (mini_get_debug_options ()->better_cast_details) {
3670 int vtable_reg = alloc_preg (cfg);
3671 int klass_reg = alloc_preg (cfg);
3672 MonoBasicBlock *is_null_bb = NULL;
3674 int to_klass_reg, context_used;
3677 NEW_BBLOCK (cfg, is_null_bb);
3679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3683 tls_get = mono_get_jit_tls_intrinsic (cfg);
3685 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3689 MONO_ADD_INS (cfg->cbb, tls_get);
3690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3695 context_used = mini_class_check_context_used (cfg, klass);
3697 MonoInst *class_ins;
3699 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3700 to_klass_reg = class_ins->dreg;
3702 to_klass_reg = alloc_preg (cfg);
3703 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3708 MONO_START_BB (cfg, is_null_bb);
3710 *out_bblock = cfg->cbb;
3716 reset_cast_details (MonoCompile *cfg)
3718 /* Reset the variables holding the cast details */
3719 if (mini_get_debug_options ()->better_cast_details) {
3720 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3722 MONO_ADD_INS (cfg->cbb, tls_get);
3723 /* It is enough to reset the from field */
3724 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3729 * On return the caller must check @array_class for load errors
3732 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3734 int vtable_reg = alloc_preg (cfg);
3737 context_used = mini_class_check_context_used (cfg, array_class);
3739 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3741 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3743 if (cfg->opt & MONO_OPT_SHARED) {
3744 int class_reg = alloc_preg (cfg);
3745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3746 if (cfg->compile_aot) {
3747 int klass_reg = alloc_preg (cfg);
3748 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3749 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3753 } else if (context_used) {
3754 MonoInst *vtable_ins;
3756 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3757 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3759 if (cfg->compile_aot) {
3763 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3765 vt_reg = alloc_preg (cfg);
3766 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3767 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3770 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3776 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3778 reset_cast_details (cfg);
3782 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3783 * generic code is generated.
3786 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3788 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3791 MonoInst *rgctx, *addr;
3793 /* FIXME: What if the class is shared? We might not
3794 have to get the address of the method from the
3796 addr = emit_get_rgctx_method (cfg, context_used, method,
3797 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3799 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3801 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3803 gboolean pass_vtable, pass_mrgctx;
3804 MonoInst *rgctx_arg = NULL;
3806 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3807 g_assert (!pass_mrgctx);
3810 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3813 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3816 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3821 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3825 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3826 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3827 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3828 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3830 obj_reg = sp [0]->dreg;
3831 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3834 /* FIXME: generics */
3835 g_assert (klass->rank == 0);
3838 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3839 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3842 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3845 MonoInst *element_class;
3847 /* This assertion is from the unboxcast insn */
3848 g_assert (klass->rank == 0);
3850 element_class = emit_get_rgctx_klass (cfg, context_used,
3851 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3853 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3854 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3856 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3857 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3858 reset_cast_details (cfg);
3861 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3862 MONO_ADD_INS (cfg->cbb, add);
3863 add->type = STACK_MP;
3870 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3872 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3873 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3877 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3883 args [1] = klass_inst;
3886 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3888 NEW_BBLOCK (cfg, is_ref_bb);
3889 NEW_BBLOCK (cfg, is_nullable_bb);
3890 NEW_BBLOCK (cfg, end_bb);
3891 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3892 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3893 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3895 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3896 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3898 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3899 addr_reg = alloc_dreg (cfg, STACK_MP);
3903 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3904 MONO_ADD_INS (cfg->cbb, addr);
3906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3909 MONO_START_BB (cfg, is_ref_bb);
3911 /* Save the ref to a temporary */
3912 dreg = alloc_ireg (cfg);
3913 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3914 addr->dreg = addr_reg;
3915 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3916 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3919 MONO_START_BB (cfg, is_nullable_bb);
3922 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3923 MonoInst *unbox_call;
3924 MonoMethodSignature *unbox_sig;
3926 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3927 unbox_sig->ret = &klass->byval_arg;
3928 unbox_sig->param_count = 1;
3929 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3930 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3932 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3933 addr->dreg = addr_reg;
3936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3939 MONO_START_BB (cfg, end_bb);
3942 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3944 *out_cbb = cfg->cbb;
3950 * Returns NULL and set the cfg exception on error.
3953 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3955 MonoInst *iargs [2];
3961 MonoInst *iargs [2];
3962 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3964 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3966 if (cfg->opt & MONO_OPT_SHARED)
3967 rgctx_info = MONO_RGCTX_INFO_KLASS;
3969 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3970 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3972 if (cfg->opt & MONO_OPT_SHARED) {
3973 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3975 alloc_ftn = mono_object_new;
3978 alloc_ftn = mono_object_new_specific;
3981 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3982 if (known_instance_size) {
3983 int size = mono_class_instance_size (klass);
3985 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
3987 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3990 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3993 if (cfg->opt & MONO_OPT_SHARED) {
3994 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3995 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3997 alloc_ftn = mono_object_new;
3998 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3999 /* This happens often in argument checking code, eg. throw new FooException... */
4000 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4001 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4002 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4004 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4005 MonoMethod *managed_alloc = NULL;
4009 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4010 cfg->exception_ptr = klass;
4014 #ifndef MONO_CROSS_COMPILE
4015 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4018 if (managed_alloc) {
4019 int size = mono_class_instance_size (klass);
4021 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4022 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4023 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4025 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4027 guint32 lw = vtable->klass->instance_size;
4028 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4029 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4030 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4033 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4037 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4041 * Returns NULL and set the cfg exception on error.
4044 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4046 MonoInst *alloc, *ins;
4048 *out_cbb = cfg->cbb;
4050 if (mono_class_is_nullable (klass)) {
4051 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4054 /* FIXME: What if the class is shared? We might not
4055 have to get the method address from the RGCTX. */
4056 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4057 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4058 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4060 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4062 gboolean pass_vtable, pass_mrgctx;
4063 MonoInst *rgctx_arg = NULL;
4065 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4066 g_assert (!pass_mrgctx);
4069 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4072 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4075 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4079 if (mini_is_gsharedvt_klass (cfg, klass)) {
4080 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4081 MonoInst *res, *is_ref, *src_var, *addr;
4084 dreg = alloc_ireg (cfg);
4086 NEW_BBLOCK (cfg, is_ref_bb);
4087 NEW_BBLOCK (cfg, is_nullable_bb);
4088 NEW_BBLOCK (cfg, end_bb);
4089 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4091 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4093 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4094 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4097 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4100 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4101 ins->opcode = OP_STOREV_MEMBASE;
4103 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4104 res->type = STACK_OBJ;
4106 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4109 MONO_START_BB (cfg, is_ref_bb);
4111 /* val is a vtype, so has to load the value manually */
4112 src_var = get_vreg_to_inst (cfg, val->dreg);
4114 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4115 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4117 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4120 MONO_START_BB (cfg, is_nullable_bb);
4123 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4124 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4126 MonoMethodSignature *box_sig;
4129 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4130 * construct that method at JIT time, so have to do things by hand.
4132 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4133 box_sig->ret = &mono_defaults.object_class->byval_arg;
4134 box_sig->param_count = 1;
4135 box_sig->params [0] = &klass->byval_arg;
4136 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4137 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4138 res->type = STACK_OBJ;
4142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4144 MONO_START_BB (cfg, end_bb);
4146 *out_cbb = cfg->cbb;
4150 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4154 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4160 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4163 MonoGenericContainer *container;
4164 MonoGenericInst *ginst;
4166 if (klass->generic_class) {
4167 container = klass->generic_class->container_class->generic_container;
4168 ginst = klass->generic_class->context.class_inst;
4169 } else if (klass->generic_container && context_used) {
4170 container = klass->generic_container;
4171 ginst = container->context.class_inst;
4176 for (i = 0; i < container->type_argc; ++i) {
4178 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4180 type = ginst->type_argv [i];
4181 if (mini_type_is_reference (cfg, type))
4187 static GHashTable* direct_icall_type_hash;
4190 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4192 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4193 if (!direct_icalls_enabled (cfg))
4197 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4198 * Whitelist a few icalls for now.
4200 if (!direct_icall_type_hash) {
4201 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4203 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4204 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4205 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4206 mono_memory_barrier ();
4207 direct_icall_type_hash = h;
4210 if (cmethod->klass == mono_defaults.math_class)
4212 /* No locking needed */
4213 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4218 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4221 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4223 MonoMethod *mono_castclass;
4226 mono_castclass = mono_marshal_get_castclass_with_cache ();
4228 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4229 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4230 reset_cast_details (cfg);
4231 *out_bblock = cfg->cbb;
4237 get_castclass_cache_idx (MonoCompile *cfg)
4239 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4240 cfg->castclass_cache_index ++;
4241 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4245 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4254 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4257 if (cfg->compile_aot) {
4258 idx = get_castclass_cache_idx (cfg);
4259 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4261 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4264 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4266 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4270 * Returns NULL and set the cfg exception on error.
4273 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4275 MonoBasicBlock *is_null_bb;
4276 int obj_reg = src->dreg;
4277 int vtable_reg = alloc_preg (cfg);
4279 MonoInst *klass_inst = NULL, *res;
4280 MonoBasicBlock *bblock;
4284 context_used = mini_class_check_context_used (cfg, klass);
4286 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4287 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4288 (*inline_costs) += 2;
4291 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4292 MonoMethod *mono_castclass;
4293 MonoInst *iargs [1];
4296 mono_castclass = mono_marshal_get_castclass (klass);
4299 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4300 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4301 iargs, ip, cfg->real_offset, TRUE, &bblock);
4302 reset_cast_details (cfg);
4303 CHECK_CFG_EXCEPTION;
4304 g_assert (costs > 0);
4306 cfg->real_offset += 5;
4308 (*inline_costs) += costs;
4317 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4318 MonoInst *cache_ins;
4320 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4325 /* klass - it's the second element of the cache entry*/
4326 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4329 args [2] = cache_ins;
4331 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4334 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4337 NEW_BBLOCK (cfg, is_null_bb);
4339 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4340 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4342 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4344 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4346 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4348 int klass_reg = alloc_preg (cfg);
4350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4352 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4353 /* the remoting code is broken, access the class for now */
4354 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4355 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4357 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4358 cfg->exception_ptr = klass;
4361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4363 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4364 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4366 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4369 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4373 MONO_START_BB (cfg, is_null_bb);
4375 reset_cast_details (cfg);
4386 * Returns NULL and set the cfg exception on error.
4389 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4392 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4393 int obj_reg = src->dreg;
4394 int vtable_reg = alloc_preg (cfg);
4395 int res_reg = alloc_ireg_ref (cfg);
4396 MonoInst *klass_inst = NULL;
4401 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4402 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4403 MonoInst *cache_ins;
4405 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4410 /* klass - it's the second element of the cache entry*/
4411 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4414 args [2] = cache_ins;
4416 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4419 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4422 NEW_BBLOCK (cfg, is_null_bb);
4423 NEW_BBLOCK (cfg, false_bb);
4424 NEW_BBLOCK (cfg, end_bb);
4426 /* Do the assignment at the beginning, so the other assignment can be if converted */
4427 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4428 ins->type = STACK_OBJ;
4431 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4432 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4436 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4437 g_assert (!context_used);
4438 /* the is_null_bb target simply copies the input register to the output */
4439 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4441 int klass_reg = alloc_preg (cfg);
4444 int rank_reg = alloc_preg (cfg);
4445 int eclass_reg = alloc_preg (cfg);
4447 g_assert (!context_used);
4448 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4449 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4450 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4453 if (klass->cast_class == mono_defaults.object_class) {
4454 int parent_reg = alloc_preg (cfg);
4455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4456 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4457 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4459 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4460 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4461 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4463 } else if (klass->cast_class == mono_defaults.enum_class) {
4464 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4465 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4466 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4467 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4469 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4470 /* Check that the object is a vector too */
4471 int bounds_reg = alloc_preg (cfg);
4472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4473 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4477 /* the is_null_bb target simply copies the input register to the output */
4478 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4480 } else if (mono_class_is_nullable (klass)) {
4481 g_assert (!context_used);
4482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4483 /* the is_null_bb target simply copies the input register to the output */
4484 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4486 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4487 g_assert (!context_used);
4488 /* the remoting code is broken, access the class for now */
4489 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4490 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4492 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4493 cfg->exception_ptr = klass;
4496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4501 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4505 /* the is_null_bb target simply copies the input register to the output */
4506 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4511 MONO_START_BB (cfg, false_bb);
4513 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4514 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4516 MONO_START_BB (cfg, is_null_bb);
4518 MONO_START_BB (cfg, end_bb);
4524 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4526 /* This opcode takes as input an object reference and a class, and returns:
4527 0) if the object is an instance of the class,
4528 1) if the object is not instance of the class,
4529 2) if the object is a proxy whose type cannot be determined */
4532 #ifndef DISABLE_REMOTING
4533 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4535 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4537 int obj_reg = src->dreg;
4538 int dreg = alloc_ireg (cfg);
4540 #ifndef DISABLE_REMOTING
4541 int klass_reg = alloc_preg (cfg);
4544 NEW_BBLOCK (cfg, true_bb);
4545 NEW_BBLOCK (cfg, false_bb);
4546 NEW_BBLOCK (cfg, end_bb);
4547 #ifndef DISABLE_REMOTING
4548 NEW_BBLOCK (cfg, false2_bb);
4549 NEW_BBLOCK (cfg, no_proxy_bb);
4552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4555 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4556 #ifndef DISABLE_REMOTING
4557 NEW_BBLOCK (cfg, interface_fail_bb);
4560 tmp_reg = alloc_preg (cfg);
4561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4562 #ifndef DISABLE_REMOTING
4563 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4564 MONO_START_BB (cfg, interface_fail_bb);
4565 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4567 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4569 tmp_reg = alloc_preg (cfg);
4570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4572 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4574 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4577 #ifndef DISABLE_REMOTING
4578 tmp_reg = alloc_preg (cfg);
4579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4582 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4583 tmp_reg = alloc_preg (cfg);
4584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4587 tmp_reg = alloc_preg (cfg);
4588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4592 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4595 MONO_START_BB (cfg, no_proxy_bb);
4597 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4599 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4603 MONO_START_BB (cfg, false_bb);
4605 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4608 #ifndef DISABLE_REMOTING
4609 MONO_START_BB (cfg, false2_bb);
4611 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4612 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4615 MONO_START_BB (cfg, true_bb);
4617 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4619 MONO_START_BB (cfg, end_bb);
4622 MONO_INST_NEW (cfg, ins, OP_ICONST);
4624 ins->type = STACK_I4;
4630 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4632 /* This opcode takes as input an object reference and a class, and returns:
4633 0) if the object is an instance of the class,
4634 1) if the object is a proxy whose type cannot be determined
4635 an InvalidCastException exception is thrown otherwhise*/
4638 #ifndef DISABLE_REMOTING
4639 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4641 MonoBasicBlock *ok_result_bb;
4643 int obj_reg = src->dreg;
4644 int dreg = alloc_ireg (cfg);
4645 int tmp_reg = alloc_preg (cfg);
4647 #ifndef DISABLE_REMOTING
4648 int klass_reg = alloc_preg (cfg);
4649 NEW_BBLOCK (cfg, end_bb);
4652 NEW_BBLOCK (cfg, ok_result_bb);
4654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4657 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4659 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4660 #ifndef DISABLE_REMOTING
4661 NEW_BBLOCK (cfg, interface_fail_bb);
4663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4664 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4665 MONO_START_BB (cfg, interface_fail_bb);
4666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4668 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4670 tmp_reg = alloc_preg (cfg);
4671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4673 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4675 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4679 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4683 #ifndef DISABLE_REMOTING
4684 NEW_BBLOCK (cfg, no_proxy_bb);
4686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4688 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4690 tmp_reg = alloc_preg (cfg);
4691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4692 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4694 tmp_reg = alloc_preg (cfg);
4695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4699 NEW_BBLOCK (cfg, fail_1_bb);
4701 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4703 MONO_START_BB (cfg, fail_1_bb);
4705 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4706 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4708 MONO_START_BB (cfg, no_proxy_bb);
4710 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4712 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4716 MONO_START_BB (cfg, ok_result_bb);
4718 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4720 #ifndef DISABLE_REMOTING
4721 MONO_START_BB (cfg, end_bb);
4725 MONO_INST_NEW (cfg, ins, OP_ICONST);
4727 ins->type = STACK_I4;
4732 static G_GNUC_UNUSED MonoInst*
4733 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4735 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4736 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4739 switch (enum_type->type) {
4742 #if SIZEOF_REGISTER == 8
4754 MonoInst *load, *and, *cmp, *ceq;
4755 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4756 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4757 int dest_reg = alloc_ireg (cfg);
4759 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4760 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4761 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4762 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4764 ceq->type = STACK_I4;
4767 load = mono_decompose_opcode (cfg, load, NULL);
4768 and = mono_decompose_opcode (cfg, and, NULL);
4769 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4770 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4778 * Returns NULL and set the cfg exception on error.
4780 static G_GNUC_UNUSED MonoInst*
4781 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4785 gpointer trampoline;
4786 MonoInst *obj, *method_ins, *tramp_ins;
4791 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4794 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4798 obj = handle_alloc (cfg, klass, FALSE, 0);
4802 /* Inline the contents of mono_delegate_ctor */
4804 /* Set target field */
4805 /* Optimize away setting of NULL target */
4806 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4808 if (cfg->gen_write_barriers) {
4809 dreg = alloc_preg (cfg);
4810 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4811 emit_write_barrier (cfg, ptr, target);
4815 /* Set method field */
4816 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4820 * To avoid looking up the compiled code belonging to the target method
4821 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4822 * store it, and we fill it after the method has been compiled.
4824 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4825 MonoInst *code_slot_ins;
4828 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4830 domain = mono_domain_get ();
4831 mono_domain_lock (domain);
4832 if (!domain_jit_info (domain)->method_code_hash)
4833 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4834 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4836 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4837 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4839 mono_domain_unlock (domain);
4841 if (cfg->compile_aot)
4842 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4844 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4846 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4849 if (cfg->compile_aot) {
4850 MonoDelegateClassMethodPair *del_tramp;
4852 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4853 del_tramp->klass = klass;
4854 del_tramp->method = context_used ? NULL : method;
4855 del_tramp->virtual = virtual;
4856 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4859 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4861 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4862 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4865 /* Set invoke_impl field */
4867 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4869 dreg = alloc_preg (cfg);
4870 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4873 dreg = alloc_preg (cfg);
4874 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4878 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4884 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4886 MonoJitICallInfo *info;
4888 /* Need to register the icall so it gets an icall wrapper */
4889 info = mono_get_array_new_va_icall (rank);
4891 cfg->flags |= MONO_CFG_HAS_VARARGS;
4893 /* mono_array_new_va () needs a vararg calling convention */
4894 cfg->disable_llvm = TRUE;
4896 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4897 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4901 * handle_constrained_gsharedvt_call:
4903 * Handle constrained calls where the receiver is a gsharedvt type.
4904 * Return the instruction representing the call. Set the cfg exception on failure.
4907 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4908 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4910 MonoInst *ins = NULL;
4911 MonoBasicBlock *bblock = *ref_bblock;
4912 gboolean emit_widen = *ref_emit_widen;
4915 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4916 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4917 * pack the arguments into an array, and do the rest of the work in in an icall.
4919 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4920 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4921 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4922 MonoInst *args [16];
4925 * This case handles calls to
4926 * - object:ToString()/Equals()/GetHashCode(),
4927 * - System.IComparable<T>:CompareTo()
4928 * - System.IEquatable<T>:Equals ()
4929 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4933 if (mono_method_check_context_used (cmethod))
4934 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4936 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4937 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4939 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4940 if (fsig->hasthis && fsig->param_count) {
4941 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4942 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4943 ins->dreg = alloc_preg (cfg);
4944 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4945 MONO_ADD_INS (cfg->cbb, ins);
4948 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4951 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4953 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4954 addr_reg = ins->dreg;
4955 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4957 EMIT_NEW_ICONST (cfg, args [3], 0);
4958 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4961 EMIT_NEW_ICONST (cfg, args [3], 0);
4962 EMIT_NEW_ICONST (cfg, args [4], 0);
4964 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4967 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4968 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4969 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4973 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4974 MONO_ADD_INS (cfg->cbb, add);
4976 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4977 MONO_ADD_INS (cfg->cbb, ins);
4978 /* ins represents the call result */
4981 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4984 *ref_emit_widen = emit_widen;
4985 *ref_bblock = bblock;
4994 mono_emit_load_got_addr (MonoCompile *cfg)
4996 MonoInst *getaddr, *dummy_use;
4998 if (!cfg->got_var || cfg->got_var_allocated)
5001 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5002 getaddr->cil_code = cfg->header->code;
5003 getaddr->dreg = cfg->got_var->dreg;
5005 /* Add it to the start of the first bblock */
5006 if (cfg->bb_entry->code) {
5007 getaddr->next = cfg->bb_entry->code;
5008 cfg->bb_entry->code = getaddr;
5011 MONO_ADD_INS (cfg->bb_entry, getaddr);
5013 cfg->got_var_allocated = TRUE;
5016 * Add a dummy use to keep the got_var alive, since real uses might
5017 * only be generated by the back ends.
5018 * Add it to end_bblock, so the variable's lifetime covers the whole
5020 * It would be better to make the usage of the got var explicit in all
5021 * cases when the backend needs it (i.e. calls, throw etc.), so this
5022 * wouldn't be needed.
5024 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5025 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5028 static int inline_limit;
5029 static gboolean inline_limit_inited;
5032 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5034 MonoMethodHeaderSummary header;
5036 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5037 MonoMethodSignature *sig = mono_method_signature (method);
5041 if (cfg->disable_inline)
5043 if (cfg->generic_sharing_context)
5046 if (cfg->inline_depth > 10)
5049 #ifdef MONO_ARCH_HAVE_LMF_OPS
5050 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5051 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5052 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5057 if (!mono_method_get_header_summary (method, &header))
5060 /*runtime, icall and pinvoke are checked by summary call*/
5061 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5062 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5063 (mono_class_is_marshalbyref (method->klass)) ||
5067 /* also consider num_locals? */
5068 /* Do the size check early to avoid creating vtables */
5069 if (!inline_limit_inited) {
5070 if (g_getenv ("MONO_INLINELIMIT"))
5071 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5073 inline_limit = INLINE_LENGTH_LIMIT;
5074 inline_limit_inited = TRUE;
5076 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5080 * if we can initialize the class of the method right away, we do,
5081 * otherwise we don't allow inlining if the class needs initialization,
5082 * since it would mean inserting a call to mono_runtime_class_init()
5083 * inside the inlined code
5085 if (!(cfg->opt & MONO_OPT_SHARED)) {
5086 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5087 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5088 vtable = mono_class_vtable (cfg->domain, method->klass);
5091 if (!cfg->compile_aot)
5092 mono_runtime_class_init (vtable);
5093 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5094 if (cfg->run_cctors && method->klass->has_cctor) {
5095 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5096 if (!method->klass->runtime_info)
5097 /* No vtable created yet */
5099 vtable = mono_class_vtable (cfg->domain, method->klass);
5102 /* This makes so that inline cannot trigger */
5103 /* .cctors: too many apps depend on them */
5104 /* running with a specific order... */
5105 if (! vtable->initialized)
5107 mono_runtime_class_init (vtable);
5109 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5110 if (!method->klass->runtime_info)
5111 /* No vtable created yet */
5113 vtable = mono_class_vtable (cfg->domain, method->klass);
5116 if (!vtable->initialized)
5121 * If we're compiling for shared code
5122 * the cctor will need to be run at aot method load time, for example,
5123 * or at the end of the compilation of the inlining method.
5125 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5129 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5130 if (mono_arch_is_soft_float ()) {
5132 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5134 for (i = 0; i < sig->param_count; ++i)
5135 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5140 if (g_list_find (cfg->dont_inline, method))
5147 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5149 if (!cfg->compile_aot) {
5151 if (vtable->initialized)
5155 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5156 if (cfg->method == method)
5160 if (!mono_class_needs_cctor_run (klass, method))
5163 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5164 /* The initialization is already done before the method is called */
5171 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5175 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5178 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5181 mono_class_init (klass);
5182 size = mono_class_array_element_size (klass);
5185 mult_reg = alloc_preg (cfg);
5186 array_reg = arr->dreg;
5187 index_reg = index->dreg;
5189 #if SIZEOF_REGISTER == 8
5190 /* The array reg is 64 bits but the index reg is only 32 */
5191 if (COMPILE_LLVM (cfg)) {
5193 index2_reg = index_reg;
5195 index2_reg = alloc_preg (cfg);
5196 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5199 if (index->type == STACK_I8) {
5200 index2_reg = alloc_preg (cfg);
5201 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5203 index2_reg = index_reg;
5208 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5210 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5211 if (size == 1 || size == 2 || size == 4 || size == 8) {
5212 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5214 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5215 ins->klass = mono_class_get_element_class (klass);
5216 ins->type = STACK_MP;
5222 add_reg = alloc_ireg_mp (cfg);
5225 MonoInst *rgctx_ins;
5228 g_assert (cfg->generic_sharing_context);
5229 context_used = mini_class_check_context_used (cfg, klass);
5230 g_assert (context_used);
5231 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5232 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5234 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5236 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5237 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5238 ins->klass = mono_class_get_element_class (klass);
5239 ins->type = STACK_MP;
5240 MONO_ADD_INS (cfg->cbb, ins);
5245 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5247 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5249 int bounds_reg = alloc_preg (cfg);
5250 int add_reg = alloc_ireg_mp (cfg);
5251 int mult_reg = alloc_preg (cfg);
5252 int mult2_reg = alloc_preg (cfg);
5253 int low1_reg = alloc_preg (cfg);
5254 int low2_reg = alloc_preg (cfg);
5255 int high1_reg = alloc_preg (cfg);
5256 int high2_reg = alloc_preg (cfg);
5257 int realidx1_reg = alloc_preg (cfg);
5258 int realidx2_reg = alloc_preg (cfg);
5259 int sum_reg = alloc_preg (cfg);
5260 int index1, index2, tmpreg;
5264 mono_class_init (klass);
5265 size = mono_class_array_element_size (klass);
5267 index1 = index_ins1->dreg;
5268 index2 = index_ins2->dreg;
5270 #if SIZEOF_REGISTER == 8
5271 /* The array reg is 64 bits but the index reg is only 32 */
5272 if (COMPILE_LLVM (cfg)) {
5275 tmpreg = alloc_preg (cfg);
5276 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5278 tmpreg = alloc_preg (cfg);
5279 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5283 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5287 /* range checking */
5288 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5289 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5291 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5292 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5293 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5294 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5295 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5296 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5297 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5299 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5300 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5301 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5302 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5303 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5304 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5305 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5307 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5308 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5309 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5310 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5311 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5313 ins->type = STACK_MP;
5315 MONO_ADD_INS (cfg->cbb, ins);
5322 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5326 MonoMethod *addr_method;
5328 MonoClass *eclass = cmethod->klass->element_class;
5330 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5333 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5335 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5336 /* emit_ldelema_2 depends on OP_LMUL */
5337 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5338 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5342 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5345 element_size = mono_class_array_element_size (eclass);
5346 addr_method = mono_marshal_get_array_address (rank, element_size);
5347 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5352 static MonoBreakPolicy
5353 always_insert_breakpoint (MonoMethod *method)
5355 return MONO_BREAK_POLICY_ALWAYS;
5358 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5361 * mono_set_break_policy:
5362 * policy_callback: the new callback function
5364 * Allow embedders to decide wherther to actually obey breakpoint instructions
5365 * (both break IL instructions and Debugger.Break () method calls), for example
5366 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5367 * untrusted or semi-trusted code.
5369 * @policy_callback will be called every time a break point instruction needs to
5370 * be inserted with the method argument being the method that calls Debugger.Break()
5371 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5372 * if it wants the breakpoint to not be effective in the given method.
5373 * #MONO_BREAK_POLICY_ALWAYS is the default.
5376 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5378 if (policy_callback)
5379 break_policy_func = policy_callback;
5381 break_policy_func = always_insert_breakpoint;
5385 should_insert_brekpoint (MonoMethod *method) {
5386 switch (break_policy_func (method)) {
5387 case MONO_BREAK_POLICY_ALWAYS:
5389 case MONO_BREAK_POLICY_NEVER:
5391 case MONO_BREAK_POLICY_ON_DBG:
5392 g_warning ("mdb no longer supported");
5395 g_warning ("Incorrect value returned from break policy callback");
5400 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5402 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5404 MonoInst *addr, *store, *load;
5405 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5407 /* the bounds check is already done by the callers */
5408 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5410 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5411 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5412 if (mini_type_is_reference (cfg, fsig->params [2]))
5413 emit_write_barrier (cfg, addr, load);
5415 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5416 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5423 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5425 return mini_type_is_reference (cfg, &klass->byval_arg);
5429 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5431 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5432 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5433 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5434 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5435 MonoInst *iargs [3];
5438 mono_class_setup_vtable (obj_array);
5439 g_assert (helper->slot);
5441 if (sp [0]->type != STACK_OBJ)
5443 if (sp [2]->type != STACK_OBJ)
5450 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5454 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5457 // FIXME-VT: OP_ICONST optimization
5458 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5459 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5460 ins->opcode = OP_STOREV_MEMBASE;
5461 } else if (sp [1]->opcode == OP_ICONST) {
5462 int array_reg = sp [0]->dreg;
5463 int index_reg = sp [1]->dreg;
5464 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5467 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5468 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5470 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5471 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5472 if (generic_class_is_reference_type (cfg, klass))
5473 emit_write_barrier (cfg, addr, sp [2]);
5480 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5485 eklass = mono_class_from_mono_type (fsig->params [2]);
5487 eklass = mono_class_from_mono_type (fsig->ret);
5490 return emit_array_store (cfg, eklass, args, FALSE);
5492 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5493 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5499 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5503 //Only allow for valuetypes
5504 if (!param_klass->valuetype || !return_klass->valuetype)
5508 if (param_klass->has_references || return_klass->has_references)
5511 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5512 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5513 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5516 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5517 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5520 //And have the same size
5521 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5527 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5529 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5530 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5532 //Valuetypes that are semantically equivalent
5533 if (is_unsafe_mov_compatible (param_klass, return_klass))
5536 //Arrays of valuetypes that are semantically equivalent
5537 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5544 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5546 #ifdef MONO_ARCH_SIMD_INTRINSICS
5547 MonoInst *ins = NULL;
5549 if (cfg->opt & MONO_OPT_SIMD) {
5550 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5556 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5560 emit_memory_barrier (MonoCompile *cfg, int kind)
5562 MonoInst *ins = NULL;
5563 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5564 MONO_ADD_INS (cfg->cbb, ins);
5565 ins->backend.memory_barrier_kind = kind;
5571 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5573 MonoInst *ins = NULL;
5576 /* The LLVM backend supports these intrinsics */
5577 if (cmethod->klass == mono_defaults.math_class) {
5578 if (strcmp (cmethod->name, "Sin") == 0) {
5580 } else if (strcmp (cmethod->name, "Cos") == 0) {
5582 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5584 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5588 if (opcode && fsig->param_count == 1) {
5589 MONO_INST_NEW (cfg, ins, opcode);
5590 ins->type = STACK_R8;
5591 ins->dreg = mono_alloc_freg (cfg);
5592 ins->sreg1 = args [0]->dreg;
5593 MONO_ADD_INS (cfg->cbb, ins);
5597 if (cfg->opt & MONO_OPT_CMOV) {
5598 if (strcmp (cmethod->name, "Min") == 0) {
5599 if (fsig->params [0]->type == MONO_TYPE_I4)
5601 if (fsig->params [0]->type == MONO_TYPE_U4)
5602 opcode = OP_IMIN_UN;
5603 else if (fsig->params [0]->type == MONO_TYPE_I8)
5605 else if (fsig->params [0]->type == MONO_TYPE_U8)
5606 opcode = OP_LMIN_UN;
5607 } else if (strcmp (cmethod->name, "Max") == 0) {
5608 if (fsig->params [0]->type == MONO_TYPE_I4)
5610 if (fsig->params [0]->type == MONO_TYPE_U4)
5611 opcode = OP_IMAX_UN;
5612 else if (fsig->params [0]->type == MONO_TYPE_I8)
5614 else if (fsig->params [0]->type == MONO_TYPE_U8)
5615 opcode = OP_LMAX_UN;
5619 if (opcode && fsig->param_count == 2) {
5620 MONO_INST_NEW (cfg, ins, opcode);
5621 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5622 ins->dreg = mono_alloc_ireg (cfg);
5623 ins->sreg1 = args [0]->dreg;
5624 ins->sreg2 = args [1]->dreg;
5625 MONO_ADD_INS (cfg->cbb, ins);
5633 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5635 if (cmethod->klass == mono_defaults.array_class) {
5636 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5637 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5638 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5639 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5640 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5641 return emit_array_unsafe_mov (cfg, fsig, args);
5648 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5650 MonoInst *ins = NULL;
5652 static MonoClass *runtime_helpers_class = NULL;
5653 if (! runtime_helpers_class)
5654 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5655 "System.Runtime.CompilerServices", "RuntimeHelpers");
5657 if (cmethod->klass == mono_defaults.string_class) {
5658 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5659 int dreg = alloc_ireg (cfg);
5660 int index_reg = alloc_preg (cfg);
5661 int add_reg = alloc_preg (cfg);
5663 #if SIZEOF_REGISTER == 8
5664 /* The array reg is 64 bits but the index reg is only 32 */
5665 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5667 index_reg = args [1]->dreg;
5669 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5671 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5672 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5673 add_reg = ins->dreg;
5674 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5677 int mult_reg = alloc_preg (cfg);
5678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5679 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5680 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5681 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5683 type_from_op (cfg, ins, NULL, NULL);
5685 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5686 int dreg = alloc_ireg (cfg);
5687 /* Decompose later to allow more optimizations */
5688 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5689 ins->type = STACK_I4;
5690 ins->flags |= MONO_INST_FAULT;
5691 cfg->cbb->has_array_access = TRUE;
5692 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5697 } else if (cmethod->klass == mono_defaults.object_class) {
5699 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5700 int dreg = alloc_ireg_ref (cfg);
5701 int vt_reg = alloc_preg (cfg);
5702 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5703 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5704 type_from_op (cfg, ins, NULL, NULL);
5707 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5708 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5709 int dreg = alloc_ireg (cfg);
5710 int t1 = alloc_ireg (cfg);
5712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5713 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5714 ins->type = STACK_I4;
5718 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5719 MONO_INST_NEW (cfg, ins, OP_NOP);
5720 MONO_ADD_INS (cfg->cbb, ins);
5724 } else if (cmethod->klass == mono_defaults.array_class) {
5725 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5726 return emit_array_generic_access (cfg, fsig, args, FALSE);
5727 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5728 return emit_array_generic_access (cfg, fsig, args, TRUE);
5730 #ifndef MONO_BIG_ARRAYS
5732 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5735 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5736 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5737 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5738 int dreg = alloc_ireg (cfg);
5739 int bounds_reg = alloc_ireg_mp (cfg);
5740 MonoBasicBlock *end_bb, *szarray_bb;
5741 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5743 NEW_BBLOCK (cfg, end_bb);
5744 NEW_BBLOCK (cfg, szarray_bb);
5746 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5747 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5750 /* Non-szarray case */
5752 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5753 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5755 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5756 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5757 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5758 MONO_START_BB (cfg, szarray_bb);
5761 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5762 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5764 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5765 MONO_START_BB (cfg, end_bb);
5767 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5768 ins->type = STACK_I4;
5774 if (cmethod->name [0] != 'g')
5777 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5778 int dreg = alloc_ireg (cfg);
5779 int vtable_reg = alloc_preg (cfg);
5780 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5781 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5782 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5783 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5784 type_from_op (cfg, ins, NULL, NULL);
5787 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5788 int dreg = alloc_ireg (cfg);
5790 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5791 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5792 type_from_op (cfg, ins, NULL, NULL);
5797 } else if (cmethod->klass == runtime_helpers_class) {
5799 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5800 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5804 } else if (cmethod->klass == mono_defaults.thread_class) {
5805 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5806 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5807 MONO_ADD_INS (cfg->cbb, ins);
5809 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5810 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5811 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5813 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5815 if (fsig->params [0]->type == MONO_TYPE_I1)
5816 opcode = OP_LOADI1_MEMBASE;
5817 else if (fsig->params [0]->type == MONO_TYPE_U1)
5818 opcode = OP_LOADU1_MEMBASE;
5819 else if (fsig->params [0]->type == MONO_TYPE_I2)
5820 opcode = OP_LOADI2_MEMBASE;
5821 else if (fsig->params [0]->type == MONO_TYPE_U2)
5822 opcode = OP_LOADU2_MEMBASE;
5823 else if (fsig->params [0]->type == MONO_TYPE_I4)
5824 opcode = OP_LOADI4_MEMBASE;
5825 else if (fsig->params [0]->type == MONO_TYPE_U4)
5826 opcode = OP_LOADU4_MEMBASE;
5827 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5828 opcode = OP_LOADI8_MEMBASE;
5829 else if (fsig->params [0]->type == MONO_TYPE_R4)
5830 opcode = OP_LOADR4_MEMBASE;
5831 else if (fsig->params [0]->type == MONO_TYPE_R8)
5832 opcode = OP_LOADR8_MEMBASE;
5833 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5834 opcode = OP_LOAD_MEMBASE;
5837 MONO_INST_NEW (cfg, ins, opcode);
5838 ins->inst_basereg = args [0]->dreg;
5839 ins->inst_offset = 0;
5840 MONO_ADD_INS (cfg->cbb, ins);
5842 switch (fsig->params [0]->type) {
5849 ins->dreg = mono_alloc_ireg (cfg);
5850 ins->type = STACK_I4;
5854 ins->dreg = mono_alloc_lreg (cfg);
5855 ins->type = STACK_I8;
5859 ins->dreg = mono_alloc_ireg (cfg);
5860 #if SIZEOF_REGISTER == 8
5861 ins->type = STACK_I8;
5863 ins->type = STACK_I4;
5868 ins->dreg = mono_alloc_freg (cfg);
5869 ins->type = STACK_R8;
5872 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5873 ins->dreg = mono_alloc_ireg_ref (cfg);
5874 ins->type = STACK_OBJ;
5878 if (opcode == OP_LOADI8_MEMBASE)
5879 ins = mono_decompose_opcode (cfg, ins, NULL);
5881 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5885 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5887 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5889 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5890 opcode = OP_STOREI1_MEMBASE_REG;
5891 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5892 opcode = OP_STOREI2_MEMBASE_REG;
5893 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5894 opcode = OP_STOREI4_MEMBASE_REG;
5895 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5896 opcode = OP_STOREI8_MEMBASE_REG;
5897 else if (fsig->params [0]->type == MONO_TYPE_R4)
5898 opcode = OP_STORER4_MEMBASE_REG;
5899 else if (fsig->params [0]->type == MONO_TYPE_R8)
5900 opcode = OP_STORER8_MEMBASE_REG;
5901 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5902 opcode = OP_STORE_MEMBASE_REG;
5905 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5907 MONO_INST_NEW (cfg, ins, opcode);
5908 ins->sreg1 = args [1]->dreg;
5909 ins->inst_destbasereg = args [0]->dreg;
5910 ins->inst_offset = 0;
5911 MONO_ADD_INS (cfg->cbb, ins);
5913 if (opcode == OP_STOREI8_MEMBASE_REG)
5914 ins = mono_decompose_opcode (cfg, ins, NULL);
5919 } else if (cmethod->klass == mono_defaults.monitor_class) {
5920 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5921 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5924 if (COMPILE_LLVM (cfg)) {
5926 * Pass the argument normally, the LLVM backend will handle the
5927 * calling convention problems.
5929 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5931 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5932 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5933 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5934 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5937 return (MonoInst*)call;
5938 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5939 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5942 if (COMPILE_LLVM (cfg)) {
5944 * Pass the argument normally, the LLVM backend will handle the
5945 * calling convention problems.
5947 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5949 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5950 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5951 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5952 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5955 return (MonoInst*)call;
5957 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5960 if (COMPILE_LLVM (cfg)) {
5961 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5963 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5964 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5965 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5966 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5969 return (MonoInst*)call;
5972 } else if (cmethod->klass->image == mono_defaults.corlib &&
5973 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5974 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5977 #if SIZEOF_REGISTER == 8
5978 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5979 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5980 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5981 ins->dreg = mono_alloc_preg (cfg);
5982 ins->sreg1 = args [0]->dreg;
5983 ins->type = STACK_I8;
5984 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5985 MONO_ADD_INS (cfg->cbb, ins);
5989 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5991 /* 64 bit reads are already atomic */
5992 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5993 load_ins->dreg = mono_alloc_preg (cfg);
5994 load_ins->inst_basereg = args [0]->dreg;
5995 load_ins->inst_offset = 0;
5996 load_ins->type = STACK_I8;
5997 MONO_ADD_INS (cfg->cbb, load_ins);
5999 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6006 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6007 MonoInst *ins_iconst;
6010 if (fsig->params [0]->type == MONO_TYPE_I4) {
6011 opcode = OP_ATOMIC_ADD_I4;
6012 cfg->has_atomic_add_i4 = TRUE;
6014 #if SIZEOF_REGISTER == 8
6015 else if (fsig->params [0]->type == MONO_TYPE_I8)
6016 opcode = OP_ATOMIC_ADD_I8;
6019 if (!mono_arch_opcode_supported (opcode))
6021 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6022 ins_iconst->inst_c0 = 1;
6023 ins_iconst->dreg = mono_alloc_ireg (cfg);
6024 MONO_ADD_INS (cfg->cbb, ins_iconst);
6026 MONO_INST_NEW (cfg, ins, opcode);
6027 ins->dreg = mono_alloc_ireg (cfg);
6028 ins->inst_basereg = args [0]->dreg;
6029 ins->inst_offset = 0;
6030 ins->sreg2 = ins_iconst->dreg;
6031 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6032 MONO_ADD_INS (cfg->cbb, ins);
6034 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6035 MonoInst *ins_iconst;
6038 if (fsig->params [0]->type == MONO_TYPE_I4) {
6039 opcode = OP_ATOMIC_ADD_I4;
6040 cfg->has_atomic_add_i4 = TRUE;
6042 #if SIZEOF_REGISTER == 8
6043 else if (fsig->params [0]->type == MONO_TYPE_I8)
6044 opcode = OP_ATOMIC_ADD_I8;
6047 if (!mono_arch_opcode_supported (opcode))
6049 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6050 ins_iconst->inst_c0 = -1;
6051 ins_iconst->dreg = mono_alloc_ireg (cfg);
6052 MONO_ADD_INS (cfg->cbb, ins_iconst);
6054 MONO_INST_NEW (cfg, ins, opcode);
6055 ins->dreg = mono_alloc_ireg (cfg);
6056 ins->inst_basereg = args [0]->dreg;
6057 ins->inst_offset = 0;
6058 ins->sreg2 = ins_iconst->dreg;
6059 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6060 MONO_ADD_INS (cfg->cbb, ins);
6062 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6065 if (fsig->params [0]->type == MONO_TYPE_I4) {
6066 opcode = OP_ATOMIC_ADD_I4;
6067 cfg->has_atomic_add_i4 = TRUE;
6069 #if SIZEOF_REGISTER == 8
6070 else if (fsig->params [0]->type == MONO_TYPE_I8)
6071 opcode = OP_ATOMIC_ADD_I8;
6074 if (!mono_arch_opcode_supported (opcode))
6076 MONO_INST_NEW (cfg, ins, opcode);
6077 ins->dreg = mono_alloc_ireg (cfg);
6078 ins->inst_basereg = args [0]->dreg;
6079 ins->inst_offset = 0;
6080 ins->sreg2 = args [1]->dreg;
6081 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6082 MONO_ADD_INS (cfg->cbb, ins);
6085 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6086 MonoInst *f2i = NULL, *i2f;
6087 guint32 opcode, f2i_opcode, i2f_opcode;
6088 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6089 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6091 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6092 fsig->params [0]->type == MONO_TYPE_R4) {
6093 opcode = OP_ATOMIC_EXCHANGE_I4;
6094 f2i_opcode = OP_MOVE_F_TO_I4;
6095 i2f_opcode = OP_MOVE_I4_TO_F;
6096 cfg->has_atomic_exchange_i4 = TRUE;
6098 #if SIZEOF_REGISTER == 8
6100 fsig->params [0]->type == MONO_TYPE_I8 ||
6101 fsig->params [0]->type == MONO_TYPE_R8 ||
6102 fsig->params [0]->type == MONO_TYPE_I) {
6103 opcode = OP_ATOMIC_EXCHANGE_I8;
6104 f2i_opcode = OP_MOVE_F_TO_I8;
6105 i2f_opcode = OP_MOVE_I8_TO_F;
6108 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6109 opcode = OP_ATOMIC_EXCHANGE_I4;
6110 cfg->has_atomic_exchange_i4 = TRUE;
6116 if (!mono_arch_opcode_supported (opcode))
6120 /* TODO: Decompose these opcodes instead of bailing here. */
6121 if (COMPILE_SOFT_FLOAT (cfg))
6124 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6125 f2i->dreg = mono_alloc_ireg (cfg);
6126 f2i->sreg1 = args [1]->dreg;
6127 if (f2i_opcode == OP_MOVE_F_TO_I4)
6128 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6129 MONO_ADD_INS (cfg->cbb, f2i);
6132 MONO_INST_NEW (cfg, ins, opcode);
6133 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6134 ins->inst_basereg = args [0]->dreg;
6135 ins->inst_offset = 0;
6136 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6137 MONO_ADD_INS (cfg->cbb, ins);
6139 switch (fsig->params [0]->type) {
6141 ins->type = STACK_I4;
6144 ins->type = STACK_I8;
6147 #if SIZEOF_REGISTER == 8
6148 ins->type = STACK_I8;
6150 ins->type = STACK_I4;
6155 ins->type = STACK_R8;
6158 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6159 ins->type = STACK_OBJ;
6164 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6165 i2f->dreg = mono_alloc_freg (cfg);
6166 i2f->sreg1 = ins->dreg;
6167 i2f->type = STACK_R8;
6168 if (i2f_opcode == OP_MOVE_I4_TO_F)
6169 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6170 MONO_ADD_INS (cfg->cbb, i2f);
6175 if (cfg->gen_write_barriers && is_ref)
6176 emit_write_barrier (cfg, args [0], args [1]);
6178 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6179 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6180 guint32 opcode, f2i_opcode, i2f_opcode;
6181 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6182 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6184 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6185 fsig->params [1]->type == MONO_TYPE_R4) {
6186 opcode = OP_ATOMIC_CAS_I4;
6187 f2i_opcode = OP_MOVE_F_TO_I4;
6188 i2f_opcode = OP_MOVE_I4_TO_F;
6189 cfg->has_atomic_cas_i4 = TRUE;
6191 #if SIZEOF_REGISTER == 8
6193 fsig->params [1]->type == MONO_TYPE_I8 ||
6194 fsig->params [1]->type == MONO_TYPE_R8 ||
6195 fsig->params [1]->type == MONO_TYPE_I) {
6196 opcode = OP_ATOMIC_CAS_I8;
6197 f2i_opcode = OP_MOVE_F_TO_I8;
6198 i2f_opcode = OP_MOVE_I8_TO_F;
6201 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6202 opcode = OP_ATOMIC_CAS_I4;
6203 cfg->has_atomic_cas_i4 = TRUE;
6209 if (!mono_arch_opcode_supported (opcode))
6213 /* TODO: Decompose these opcodes instead of bailing here. */
6214 if (COMPILE_SOFT_FLOAT (cfg))
6217 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6218 f2i_new->dreg = mono_alloc_ireg (cfg);
6219 f2i_new->sreg1 = args [1]->dreg;
6220 if (f2i_opcode == OP_MOVE_F_TO_I4)
6221 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6222 MONO_ADD_INS (cfg->cbb, f2i_new);
6224 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6225 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6226 f2i_cmp->sreg1 = args [2]->dreg;
6227 if (f2i_opcode == OP_MOVE_F_TO_I4)
6228 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6229 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6232 MONO_INST_NEW (cfg, ins, opcode);
6233 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6234 ins->sreg1 = args [0]->dreg;
6235 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6236 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6237 MONO_ADD_INS (cfg->cbb, ins);
6239 switch (fsig->params [1]->type) {
6241 ins->type = STACK_I4;
6244 ins->type = STACK_I8;
6247 #if SIZEOF_REGISTER == 8
6248 ins->type = STACK_I8;
6250 ins->type = STACK_I4;
6255 ins->type = STACK_R8;
6258 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6259 ins->type = STACK_OBJ;
6264 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6265 i2f->dreg = mono_alloc_freg (cfg);
6266 i2f->sreg1 = ins->dreg;
6267 i2f->type = STACK_R8;
6268 if (i2f_opcode == OP_MOVE_I4_TO_F)
6269 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6270 MONO_ADD_INS (cfg->cbb, i2f);
6275 if (cfg->gen_write_barriers && is_ref)
6276 emit_write_barrier (cfg, args [0], args [1]);
6278 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6279 fsig->params [1]->type == MONO_TYPE_I4) {
6280 MonoInst *cmp, *ceq;
6282 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6285 /* int32 r = CAS (location, value, comparand); */
6286 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6287 ins->dreg = alloc_ireg (cfg);
6288 ins->sreg1 = args [0]->dreg;
6289 ins->sreg2 = args [1]->dreg;
6290 ins->sreg3 = args [2]->dreg;
6291 ins->type = STACK_I4;
6292 MONO_ADD_INS (cfg->cbb, ins);
6294 /* bool result = r == comparand; */
6295 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6296 cmp->sreg1 = ins->dreg;
6297 cmp->sreg2 = args [2]->dreg;
6298 cmp->type = STACK_I4;
6299 MONO_ADD_INS (cfg->cbb, cmp);
6301 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6302 ceq->dreg = alloc_ireg (cfg);
6303 ceq->type = STACK_I4;
6304 MONO_ADD_INS (cfg->cbb, ceq);
6306 /* *success = result; */
6307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6309 cfg->has_atomic_cas_i4 = TRUE;
6311 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6312 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6316 } else if (cmethod->klass->image == mono_defaults.corlib &&
6317 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6318 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6321 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6323 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6324 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6326 if (fsig->params [0]->type == MONO_TYPE_I1)
6327 opcode = OP_ATOMIC_LOAD_I1;
6328 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6329 opcode = OP_ATOMIC_LOAD_U1;
6330 else if (fsig->params [0]->type == MONO_TYPE_I2)
6331 opcode = OP_ATOMIC_LOAD_I2;
6332 else if (fsig->params [0]->type == MONO_TYPE_U2)
6333 opcode = OP_ATOMIC_LOAD_U2;
6334 else if (fsig->params [0]->type == MONO_TYPE_I4)
6335 opcode = OP_ATOMIC_LOAD_I4;
6336 else if (fsig->params [0]->type == MONO_TYPE_U4)
6337 opcode = OP_ATOMIC_LOAD_U4;
6338 else if (fsig->params [0]->type == MONO_TYPE_R4)
6339 opcode = OP_ATOMIC_LOAD_R4;
6340 else if (fsig->params [0]->type == MONO_TYPE_R8)
6341 opcode = OP_ATOMIC_LOAD_R8;
6342 #if SIZEOF_REGISTER == 8
6343 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6344 opcode = OP_ATOMIC_LOAD_I8;
6345 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6346 opcode = OP_ATOMIC_LOAD_U8;
6348 else if (fsig->params [0]->type == MONO_TYPE_I)
6349 opcode = OP_ATOMIC_LOAD_I4;
6350 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6351 opcode = OP_ATOMIC_LOAD_U4;
6355 if (!mono_arch_opcode_supported (opcode))
6358 MONO_INST_NEW (cfg, ins, opcode);
6359 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6360 ins->sreg1 = args [0]->dreg;
6361 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6362 MONO_ADD_INS (cfg->cbb, ins);
6364 switch (fsig->params [0]->type) {
6365 case MONO_TYPE_BOOLEAN:
6372 ins->type = STACK_I4;
6376 ins->type = STACK_I8;
6380 #if SIZEOF_REGISTER == 8
6381 ins->type = STACK_I8;
6383 ins->type = STACK_I4;
6388 ins->type = STACK_R8;
6391 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6392 ins->type = STACK_OBJ;
6398 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6400 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6402 if (fsig->params [0]->type == MONO_TYPE_I1)
6403 opcode = OP_ATOMIC_STORE_I1;
6404 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6405 opcode = OP_ATOMIC_STORE_U1;
6406 else if (fsig->params [0]->type == MONO_TYPE_I2)
6407 opcode = OP_ATOMIC_STORE_I2;
6408 else if (fsig->params [0]->type == MONO_TYPE_U2)
6409 opcode = OP_ATOMIC_STORE_U2;
6410 else if (fsig->params [0]->type == MONO_TYPE_I4)
6411 opcode = OP_ATOMIC_STORE_I4;
6412 else if (fsig->params [0]->type == MONO_TYPE_U4)
6413 opcode = OP_ATOMIC_STORE_U4;
6414 else if (fsig->params [0]->type == MONO_TYPE_R4)
6415 opcode = OP_ATOMIC_STORE_R4;
6416 else if (fsig->params [0]->type == MONO_TYPE_R8)
6417 opcode = OP_ATOMIC_STORE_R8;
6418 #if SIZEOF_REGISTER == 8
6419 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6420 opcode = OP_ATOMIC_STORE_I8;
6421 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6422 opcode = OP_ATOMIC_STORE_U8;
6424 else if (fsig->params [0]->type == MONO_TYPE_I)
6425 opcode = OP_ATOMIC_STORE_I4;
6426 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6427 opcode = OP_ATOMIC_STORE_U4;
6431 if (!mono_arch_opcode_supported (opcode))
6434 MONO_INST_NEW (cfg, ins, opcode);
6435 ins->dreg = args [0]->dreg;
6436 ins->sreg1 = args [1]->dreg;
6437 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6438 MONO_ADD_INS (cfg->cbb, ins);
6440 if (cfg->gen_write_barriers && is_ref)
6441 emit_write_barrier (cfg, args [0], args [1]);
6447 } else if (cmethod->klass->image == mono_defaults.corlib &&
6448 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6449 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6450 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6451 if (should_insert_brekpoint (cfg->method)) {
6452 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6454 MONO_INST_NEW (cfg, ins, OP_NOP);
6455 MONO_ADD_INS (cfg->cbb, ins);
6459 } else if (cmethod->klass->image == mono_defaults.corlib &&
6460 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6461 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6462 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6464 EMIT_NEW_ICONST (cfg, ins, 1);
6466 EMIT_NEW_ICONST (cfg, ins, 0);
6469 } else if (cmethod->klass == mono_defaults.math_class) {
6471 * There is general branchless code for Min/Max, but it does not work for
6473 * http://everything2.com/?node_id=1051618
6475 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6476 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6477 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6478 !strcmp (cmethod->klass->name, "Selector")) ||
6479 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6480 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6481 !strcmp (cmethod->klass->name, "Selector"))
6483 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6484 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6485 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6488 MonoJumpInfoToken *ji;
6491 cfg->disable_llvm = TRUE;
6493 if (args [0]->opcode == OP_GOT_ENTRY) {
6494 pi = args [0]->inst_p1;
6495 g_assert (pi->opcode == OP_PATCH_INFO);
6496 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6499 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6500 ji = args [0]->inst_p0;
6503 NULLIFY_INS (args [0]);
6506 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6507 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6508 ins->dreg = mono_alloc_ireg (cfg);
6510 ins->inst_p0 = mono_string_to_utf8 (s);
6511 MONO_ADD_INS (cfg->cbb, ins);
6517 #ifdef MONO_ARCH_SIMD_INTRINSICS
6518 if (cfg->opt & MONO_OPT_SIMD) {
6519 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6525 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6529 if (COMPILE_LLVM (cfg)) {
6530 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6535 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6539 * This entry point could be used later for arbitrary method
6542 inline static MonoInst*
6543 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6544 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6546 if (method->klass == mono_defaults.string_class) {
6547 /* managed string allocation support */
6548 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6549 MonoInst *iargs [2];
6550 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6551 MonoMethod *managed_alloc = NULL;
6553 g_assert (vtable); /*Should not fail since it System.String*/
6554 #ifndef MONO_CROSS_COMPILE
6555 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6559 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6560 iargs [1] = args [0];
6561 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6568 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6570 MonoInst *store, *temp;
6573 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6574 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6577 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6578 * would be different than the MonoInst's used to represent arguments, and
6579 * the ldelema implementation can't deal with that.
6580 * Solution: When ldelema is used on an inline argument, create a var for
6581 * it, emit ldelema on that var, and emit the saving code below in
6582 * inline_method () if needed.
6584 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6585 cfg->args [i] = temp;
6586 /* This uses cfg->args [i] which is set by the preceeding line */
6587 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6588 store->cil_code = sp [0]->cil_code;
6593 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6594 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6596 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6598 check_inline_called_method_name_limit (MonoMethod *called_method)
6601 static const char *limit = NULL;
6603 if (limit == NULL) {
6604 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6606 if (limit_string != NULL)
6607 limit = limit_string;
6612 if (limit [0] != '\0') {
6613 char *called_method_name = mono_method_full_name (called_method, TRUE);
6615 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6616 g_free (called_method_name);
6618 //return (strncmp_result <= 0);
6619 return (strncmp_result == 0);
6626 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6628 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6631 static const char *limit = NULL;
6633 if (limit == NULL) {
6634 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6635 if (limit_string != NULL) {
6636 limit = limit_string;
6642 if (limit [0] != '\0') {
6643 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6645 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6646 g_free (caller_method_name);
6648 //return (strncmp_result <= 0);
6649 return (strncmp_result == 0);
6657 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6659 static double r8_0 = 0.0;
6660 static float r4_0 = 0.0;
6664 rtype = mini_get_underlying_type (cfg, rtype);
6668 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6669 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6670 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6671 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6672 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6673 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6674 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6675 ins->type = STACK_R4;
6676 ins->inst_p0 = (void*)&r4_0;
6678 MONO_ADD_INS (cfg->cbb, ins);
6679 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6680 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6681 ins->type = STACK_R8;
6682 ins->inst_p0 = (void*)&r8_0;
6684 MONO_ADD_INS (cfg->cbb, ins);
6685 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6686 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6687 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6688 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6689 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6691 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6696 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6700 rtype = mini_get_underlying_type (cfg, rtype);
6704 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6705 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6706 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6707 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6708 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6709 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6710 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6711 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6712 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6713 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6714 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6715 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6716 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6717 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6719 emit_init_rvar (cfg, dreg, rtype);
6723 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6725 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6727 MonoInst *var = cfg->locals [local];
6728 if (COMPILE_SOFT_FLOAT (cfg)) {
6730 int reg = alloc_dreg (cfg, var->type);
6731 emit_init_rvar (cfg, reg, type);
6732 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6735 emit_init_rvar (cfg, var->dreg, type);
6737 emit_dummy_init_rvar (cfg, var->dreg, type);
6744 * Return the cost of inlining CMETHOD.
6747 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6748 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6750 MonoInst *ins, *rvar = NULL;
6751 MonoMethodHeader *cheader;
6752 MonoBasicBlock *ebblock, *sbblock;
6754 MonoMethod *prev_inlined_method;
6755 MonoInst **prev_locals, **prev_args;
6756 MonoType **prev_arg_types;
6757 guint prev_real_offset;
6758 GHashTable *prev_cbb_hash;
6759 MonoBasicBlock **prev_cil_offset_to_bb;
6760 MonoBasicBlock *prev_cbb;
6761 unsigned char* prev_cil_start;
6762 guint32 prev_cil_offset_to_bb_len;
6763 MonoMethod *prev_current_method;
6764 MonoGenericContext *prev_generic_context;
6765 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6767 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6769 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6770 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6773 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6774 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6779 fsig = mono_method_signature (cmethod);
6781 if (cfg->verbose_level > 2)
6782 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6784 if (!cmethod->inline_info) {
6785 cfg->stat_inlineable_methods++;
6786 cmethod->inline_info = 1;
6789 /* allocate local variables */
6790 cheader = mono_method_get_header (cmethod);
6792 if (cheader == NULL || mono_loader_get_last_error ()) {
6793 MonoLoaderError *error = mono_loader_get_last_error ();
6796 mono_metadata_free_mh (cheader);
6797 if (inline_always && error)
6798 mono_cfg_set_exception (cfg, error->exception_type);
6800 mono_loader_clear_error ();
6804 /*Must verify before creating locals as it can cause the JIT to assert.*/
6805 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6806 mono_metadata_free_mh (cheader);
6810 /* allocate space to store the return value */
6811 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6812 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6815 prev_locals = cfg->locals;
6816 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6817 for (i = 0; i < cheader->num_locals; ++i)
6818 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6820 /* allocate start and end blocks */
6821 /* This is needed so if the inline is aborted, we can clean up */
6822 NEW_BBLOCK (cfg, sbblock);
6823 sbblock->real_offset = real_offset;
6825 NEW_BBLOCK (cfg, ebblock);
6826 ebblock->block_num = cfg->num_bblocks++;
6827 ebblock->real_offset = real_offset;
6829 prev_args = cfg->args;
6830 prev_arg_types = cfg->arg_types;
6831 prev_inlined_method = cfg->inlined_method;
6832 cfg->inlined_method = cmethod;
6833 cfg->ret_var_set = FALSE;
6834 cfg->inline_depth ++;
6835 prev_real_offset = cfg->real_offset;
6836 prev_cbb_hash = cfg->cbb_hash;
6837 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6838 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6839 prev_cil_start = cfg->cil_start;
6840 prev_cbb = cfg->cbb;
6841 prev_current_method = cfg->current_method;
6842 prev_generic_context = cfg->generic_context;
6843 prev_ret_var_set = cfg->ret_var_set;
6844 prev_disable_inline = cfg->disable_inline;
6846 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6849 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6851 ret_var_set = cfg->ret_var_set;
6853 cfg->inlined_method = prev_inlined_method;
6854 cfg->real_offset = prev_real_offset;
6855 cfg->cbb_hash = prev_cbb_hash;
6856 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6857 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6858 cfg->cil_start = prev_cil_start;
6859 cfg->locals = prev_locals;
6860 cfg->args = prev_args;
6861 cfg->arg_types = prev_arg_types;
6862 cfg->current_method = prev_current_method;
6863 cfg->generic_context = prev_generic_context;
6864 cfg->ret_var_set = prev_ret_var_set;
6865 cfg->disable_inline = prev_disable_inline;
6866 cfg->inline_depth --;
6868 if ((costs >= 0 && costs < 60) || inline_always) {
6869 if (cfg->verbose_level > 2)
6870 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6872 cfg->stat_inlined_methods++;
6874 /* always add some code to avoid block split failures */
6875 MONO_INST_NEW (cfg, ins, OP_NOP);
6876 MONO_ADD_INS (prev_cbb, ins);
6878 prev_cbb->next_bb = sbblock;
6879 link_bblock (cfg, prev_cbb, sbblock);
6882 * Get rid of the begin and end bblocks if possible to aid local
6885 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6887 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6888 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6890 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6891 MonoBasicBlock *prev = ebblock->in_bb [0];
6892 mono_merge_basic_blocks (cfg, prev, ebblock);
6894 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6895 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6896 cfg->cbb = prev_cbb;
6900 * Its possible that the rvar is set in some prev bblock, but not in others.
6906 for (i = 0; i < ebblock->in_count; ++i) {
6907 bb = ebblock->in_bb [i];
6909 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6912 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6921 *out_cbb = cfg->cbb;
6925 * If the inlined method contains only a throw, then the ret var is not
6926 * set, so set it to a dummy value.
6929 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6931 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6934 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6937 if (cfg->verbose_level > 2)
6938 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6939 cfg->exception_type = MONO_EXCEPTION_NONE;
6940 mono_loader_clear_error ();
6942 /* This gets rid of the newly added bblocks */
6943 cfg->cbb = prev_cbb;
6945 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6950 * Some of these comments may well be out-of-date.
6951 * Design decisions: we do a single pass over the IL code (and we do bblock
6952 * splitting/merging in the few cases when it's required: a back jump to an IL
6953 * address that was not already seen as bblock starting point).
6954 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6955 * Complex operations are decomposed in simpler ones right away. We need to let the
6956 * arch-specific code peek and poke inside this process somehow (except when the
6957 * optimizations can take advantage of the full semantic info of coarse opcodes).
6958 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6959 * MonoInst->opcode initially is the IL opcode or some simplification of that
6960 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6961 * opcode with value bigger than OP_LAST.
6962 * At this point the IR can be handed over to an interpreter, a dumb code generator
6963 * or to the optimizing code generator that will translate it to SSA form.
6965 * Profiling directed optimizations.
6966 * We may compile by default with few or no optimizations and instrument the code
6967 * or the user may indicate what methods to optimize the most either in a config file
6968 * or through repeated runs where the compiler applies offline the optimizations to
6969 * each method and then decides if it was worth it.
6972 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6973 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6974 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6975 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6976 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6977 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6978 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6979 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6981 /* offset from br.s -> br like opcodes */
6982 #define BIG_BRANCH_OFFSET 13
6985 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6987 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6989 return b == NULL || b == bb;
6993 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6995 unsigned char *ip = start;
6996 unsigned char *target;
6999 MonoBasicBlock *bblock;
7000 const MonoOpcode *opcode;
7003 cli_addr = ip - start;
7004 i = mono_opcode_value ((const guint8 **)&ip, end);
7007 opcode = &mono_opcodes [i];
7008 switch (opcode->argument) {
7009 case MonoInlineNone:
7012 case MonoInlineString:
7013 case MonoInlineType:
7014 case MonoInlineField:
7015 case MonoInlineMethod:
7018 case MonoShortInlineR:
7025 case MonoShortInlineVar:
7026 case MonoShortInlineI:
7029 case MonoShortInlineBrTarget:
7030 target = start + cli_addr + 2 + (signed char)ip [1];
7031 GET_BBLOCK (cfg, bblock, target);
7034 GET_BBLOCK (cfg, bblock, ip);
7036 case MonoInlineBrTarget:
7037 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7038 GET_BBLOCK (cfg, bblock, target);
7041 GET_BBLOCK (cfg, bblock, ip);
7043 case MonoInlineSwitch: {
7044 guint32 n = read32 (ip + 1);
7047 cli_addr += 5 + 4 * n;
7048 target = start + cli_addr;
7049 GET_BBLOCK (cfg, bblock, target);
7051 for (j = 0; j < n; ++j) {
7052 target = start + cli_addr + (gint32)read32 (ip);
7053 GET_BBLOCK (cfg, bblock, target);
7063 g_assert_not_reached ();
7066 if (i == CEE_THROW) {
7067 unsigned char *bb_start = ip - 1;
7069 /* Find the start of the bblock containing the throw */
7071 while ((bb_start >= start) && !bblock) {
7072 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7076 bblock->out_of_line = 1;
7086 static inline MonoMethod *
7087 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7091 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7092 method = mono_method_get_wrapper_data (m, token);
7095 method = mono_class_inflate_generic_method_checked (method, context, &error);
7096 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7099 method = mono_get_method_full (m->klass->image, token, klass, context);
7105 static inline MonoMethod *
7106 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7108 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7110 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7116 static inline MonoClass*
7117 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7122 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7123 klass = mono_method_get_wrapper_data (method, token);
7125 klass = mono_class_inflate_generic_class (klass, context);
7127 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7128 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7131 mono_class_init (klass);
7135 static inline MonoMethodSignature*
7136 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7138 MonoMethodSignature *fsig;
7140 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7143 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7145 fsig = mono_inflate_generic_signature (fsig, context, &error);
7147 g_assert (mono_error_ok (&error));
7150 fsig = mono_metadata_parse_signature (method->klass->image, token);
7156 throw_exception (void)
7158 static MonoMethod *method = NULL;
7161 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7162 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7169 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7171 MonoMethod *thrower = throw_exception ();
7174 EMIT_NEW_PCONST (cfg, args [0], ex);
7175 mono_emit_method_call (cfg, thrower, args, NULL);
7179 * Return the original method is a wrapper is specified. We can only access
7180 * the custom attributes from the original method.
7183 get_original_method (MonoMethod *method)
7185 if (method->wrapper_type == MONO_WRAPPER_NONE)
7188 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7189 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7192 /* in other cases we need to find the original method */
7193 return mono_marshal_method_from_wrapper (method);
7197 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7198 MonoBasicBlock *bblock, unsigned char *ip)
7200 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7201 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7203 emit_throw_exception (cfg, ex);
7207 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7208 MonoBasicBlock *bblock, unsigned char *ip)
7210 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7211 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7213 emit_throw_exception (cfg, ex);
7217 * Check that the IL instructions at ip are the array initialization
7218 * sequence and return the pointer to the data and the size.
7221 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7224 * newarr[System.Int32]
7226 * ldtoken field valuetype ...
7227 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7229 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7231 guint32 token = read32 (ip + 7);
7232 guint32 field_token = read32 (ip + 2);
7233 guint32 field_index = field_token & 0xffffff;
7235 const char *data_ptr;
7237 MonoMethod *cmethod;
7238 MonoClass *dummy_class;
7239 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7243 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7247 *out_field_token = field_token;
7249 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7252 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7254 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7255 case MONO_TYPE_BOOLEAN:
7259 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7260 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7261 case MONO_TYPE_CHAR:
7278 if (size > mono_type_size (field->type, &dummy_align))
7281 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7282 if (!image_is_dynamic (method->klass->image)) {
7283 field_index = read32 (ip + 2) & 0xffffff;
7284 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7285 data_ptr = mono_image_rva_map (method->klass->image, rva);
7286 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7287 /* for aot code we do the lookup on load */
7288 if (aot && data_ptr)
7289 return GUINT_TO_POINTER (rva);
7291 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7293 data_ptr = mono_field_get_data (field);
7301 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7303 char *method_fname = mono_method_full_name (method, TRUE);
7305 MonoMethodHeader *header = mono_method_get_header (method);
7307 if (header->code_size == 0)
7308 method_code = g_strdup ("method body is empty.");
7310 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7311 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7312 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7313 g_free (method_fname);
7314 g_free (method_code);
7315 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7319 set_exception_object (MonoCompile *cfg, MonoException *exception)
7321 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7322 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7323 cfg->exception_ptr = exception;
7327 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7330 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7331 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7332 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7333 /* Optimize reg-reg moves away */
7335 * Can't optimize other opcodes, since sp[0] might point to
7336 * the last ins of a decomposed opcode.
7338 sp [0]->dreg = (cfg)->locals [n]->dreg;
7340 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7345 * ldloca inhibits many optimizations so try to get rid of it in common
7348 static inline unsigned char *
7349 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7359 local = read16 (ip + 2);
7363 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7364 /* From the INITOBJ case */
7365 token = read32 (ip + 2);
7366 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7367 CHECK_TYPELOAD (klass);
7368 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7369 emit_init_local (cfg, local, type, TRUE);
7377 is_exception_class (MonoClass *class)
7380 if (class == mono_defaults.exception_class)
7382 class = class->parent;
7388 * is_jit_optimizer_disabled:
7390 * Determine whenever M's assembly has a DebuggableAttribute with the
7391 * IsJITOptimizerDisabled flag set.
7394 is_jit_optimizer_disabled (MonoMethod *m)
7396 MonoAssembly *ass = m->klass->image->assembly;
7397 MonoCustomAttrInfo* attrs;
7398 static MonoClass *klass;
7400 gboolean val = FALSE;
7403 if (ass->jit_optimizer_disabled_inited)
7404 return ass->jit_optimizer_disabled;
7407 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7410 ass->jit_optimizer_disabled = FALSE;
7411 mono_memory_barrier ();
7412 ass->jit_optimizer_disabled_inited = TRUE;
7416 attrs = mono_custom_attrs_from_assembly (ass);
7418 for (i = 0; i < attrs->num_attrs; ++i) {
7419 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7421 MonoMethodSignature *sig;
7423 if (!attr->ctor || attr->ctor->klass != klass)
7425 /* Decode the attribute. See reflection.c */
7426 p = (const char*)attr->data;
7427 g_assert (read16 (p) == 0x0001);
7430 // FIXME: Support named parameters
7431 sig = mono_method_signature (attr->ctor);
7432 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7434 /* Two boolean arguments */
7438 mono_custom_attrs_free (attrs);
7441 ass->jit_optimizer_disabled = val;
7442 mono_memory_barrier ();
7443 ass->jit_optimizer_disabled_inited = TRUE;
7449 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7451 gboolean supported_tail_call;
7454 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7455 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7457 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7460 for (i = 0; i < fsig->param_count; ++i) {
7461 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7462 /* These can point to the current method's stack */
7463 supported_tail_call = FALSE;
7465 if (fsig->hasthis && cmethod->klass->valuetype)
7466 /* this might point to the current method's stack */
7467 supported_tail_call = FALSE;
7468 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7469 supported_tail_call = FALSE;
7470 if (cfg->method->save_lmf)
7471 supported_tail_call = FALSE;
7472 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7473 supported_tail_call = FALSE;
7474 if (call_opcode != CEE_CALL)
7475 supported_tail_call = FALSE;
7477 /* Debugging support */
7479 if (supported_tail_call) {
7480 if (!mono_debug_count ())
7481 supported_tail_call = FALSE;
7485 return supported_tail_call;
7488 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7489 * it to the thread local value based on the tls_offset field. Every other kind of access to
7490 * the field causes an assert.
7493 is_magic_tls_access (MonoClassField *field)
7495 if (strcmp (field->name, "tlsdata"))
7497 if (strcmp (field->parent->name, "ThreadLocal`1"))
7499 return field->parent->image == mono_defaults.corlib;
7502 /* emits the code needed to access a managed tls var (like ThreadStatic)
7503 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7504 * pointer for the current thread.
7505 * Returns the MonoInst* representing the address of the tls var.
7508 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7511 int static_data_reg, array_reg, dreg;
7512 int offset2_reg, idx_reg;
7513 // inlined access to the tls data (see threads.c)
7514 static_data_reg = alloc_ireg (cfg);
7515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7516 idx_reg = alloc_ireg (cfg);
7517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
7518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7519 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7520 array_reg = alloc_ireg (cfg);
7521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7522 offset2_reg = alloc_ireg (cfg);
7523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
7524 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
7525 dreg = alloc_ireg (cfg);
7526 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7531 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7532 * this address is cached per-method in cached_tls_addr.
7535 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7537 MonoInst *load, *addr, *temp, *store, *thread_ins;
7538 MonoClassField *offset_field;
7540 if (*cached_tls_addr) {
7541 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7544 thread_ins = mono_get_thread_intrinsic (cfg);
7545 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7547 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7549 MONO_ADD_INS (cfg->cbb, thread_ins);
7551 MonoMethod *thread_method;
7552 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7553 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7555 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7556 addr->klass = mono_class_from_mono_type (tls_field->type);
7557 addr->type = STACK_MP;
7558 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7559 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7561 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7568 * Handle calls made to ctors from NEWOBJ opcodes.
7570 * REF_BBLOCK will point to the current bblock after the call.
7573 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7574 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7576 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7577 MonoBasicBlock *bblock = *ref_bblock;
7579 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7580 mono_method_is_generic_sharable (cmethod, TRUE)) {
7581 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7582 mono_class_vtable (cfg->domain, cmethod->klass);
7583 CHECK_TYPELOAD (cmethod->klass);
7585 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7586 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7589 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7590 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7592 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7594 CHECK_TYPELOAD (cmethod->klass);
7595 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7600 /* Avoid virtual calls to ctors if possible */
7601 if (mono_class_is_marshalbyref (cmethod->klass))
7602 callvirt_this_arg = sp [0];
7604 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7605 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7606 CHECK_CFG_EXCEPTION;
7607 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7608 mono_method_check_inlining (cfg, cmethod) &&
7609 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7612 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7613 cfg->real_offset += 5;
7615 *inline_costs += costs - 5;
7616 *ref_bblock = bblock;
7618 INLINE_FAILURE ("inline failure");
7619 // FIXME-VT: Clean this up
7620 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7621 GSHAREDVT_FAILURE(*ip);
7622 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7624 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7627 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7628 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7629 } else if (context_used &&
7630 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7631 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7632 MonoInst *cmethod_addr;
7634 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7636 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7637 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7639 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7641 INLINE_FAILURE ("ctor call");
7642 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7643 callvirt_this_arg, NULL, vtable_arg);
7650 * mono_method_to_ir:
7652 * Translate the .net IL into linear IR.
7655 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7656 MonoInst *return_var, MonoInst **inline_args,
7657 guint inline_offset, gboolean is_virtual_call)
7660 MonoInst *ins, **sp, **stack_start;
7661 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7662 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7663 MonoMethod *cmethod, *method_definition;
7664 MonoInst **arg_array;
7665 MonoMethodHeader *header;
7667 guint32 token, ins_flag;
7669 MonoClass *constrained_class = NULL;
7670 unsigned char *ip, *end, *target, *err_pos;
7671 MonoMethodSignature *sig;
7672 MonoGenericContext *generic_context = NULL;
7673 MonoGenericContainer *generic_container = NULL;
7674 MonoType **param_types;
7675 int i, n, start_new_bblock, dreg;
7676 int num_calls = 0, inline_costs = 0;
7677 int breakpoint_id = 0;
7679 GSList *class_inits = NULL;
7680 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7682 gboolean init_locals, seq_points, skip_dead_blocks;
7683 gboolean sym_seq_points = FALSE;
7684 MonoInst *cached_tls_addr = NULL;
7685 MonoDebugMethodInfo *minfo;
7686 MonoBitSet *seq_point_locs = NULL;
7687 MonoBitSet *seq_point_set_locs = NULL;
7689 cfg->disable_inline = is_jit_optimizer_disabled (method);
7691 /* serialization and xdomain stuff may need access to private fields and methods */
7692 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7693 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7694 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7695 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7696 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7697 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7699 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7700 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7701 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7702 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7703 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7705 image = method->klass->image;
7706 header = mono_method_get_header (method);
7708 MonoLoaderError *error;
7710 if ((error = mono_loader_get_last_error ())) {
7711 mono_cfg_set_exception (cfg, error->exception_type);
7713 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7714 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7716 goto exception_exit;
7718 generic_container = mono_method_get_generic_container (method);
7719 sig = mono_method_signature (method);
7720 num_args = sig->hasthis + sig->param_count;
7721 ip = (unsigned char*)header->code;
7722 cfg->cil_start = ip;
7723 end = ip + header->code_size;
7724 cfg->stat_cil_code_size += header->code_size;
7726 seq_points = cfg->gen_seq_points && cfg->method == method;
7728 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7729 /* We could hit a seq point before attaching to the JIT (#8338) */
7733 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7734 minfo = mono_debug_lookup_method (method);
7736 MonoSymSeqPoint *sps;
7737 int i, n_il_offsets;
7739 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7740 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7741 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7742 sym_seq_points = TRUE;
7743 for (i = 0; i < n_il_offsets; ++i) {
7744 if (sps [i].il_offset < header->code_size)
7745 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7748 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7749 /* Methods without line number info like auto-generated property accessors */
7750 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7751 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7752 sym_seq_points = TRUE;
7757 * Methods without init_locals set could cause asserts in various passes
7758 * (#497220). To work around this, we emit dummy initialization opcodes
7759 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7760 * on some platforms.
7762 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7763 init_locals = header->init_locals;
7767 method_definition = method;
7768 while (method_definition->is_inflated) {
7769 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7770 method_definition = imethod->declaring;
7773 /* SkipVerification is not allowed if core-clr is enabled */
7774 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7776 dont_verify_stloc = TRUE;
7779 if (sig->is_inflated)
7780 generic_context = mono_method_get_context (method);
7781 else if (generic_container)
7782 generic_context = &generic_container->context;
7783 cfg->generic_context = generic_context;
7785 if (!cfg->generic_sharing_context)
7786 g_assert (!sig->has_type_parameters);
7788 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7789 g_assert (method->is_inflated);
7790 g_assert (mono_method_get_context (method)->method_inst);
7792 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7793 g_assert (sig->generic_param_count);
7795 if (cfg->method == method) {
7796 cfg->real_offset = 0;
7798 cfg->real_offset = inline_offset;
7801 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7802 cfg->cil_offset_to_bb_len = header->code_size;
7804 cfg->current_method = method;
7806 if (cfg->verbose_level > 2)
7807 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7809 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7811 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7812 for (n = 0; n < sig->param_count; ++n)
7813 param_types [n + sig->hasthis] = sig->params [n];
7814 cfg->arg_types = param_types;
7816 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7817 if (cfg->method == method) {
7819 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7820 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7823 NEW_BBLOCK (cfg, start_bblock);
7824 cfg->bb_entry = start_bblock;
7825 start_bblock->cil_code = NULL;
7826 start_bblock->cil_length = 0;
7829 NEW_BBLOCK (cfg, end_bblock);
7830 cfg->bb_exit = end_bblock;
7831 end_bblock->cil_code = NULL;
7832 end_bblock->cil_length = 0;
7833 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7834 g_assert (cfg->num_bblocks == 2);
7836 arg_array = cfg->args;
7838 if (header->num_clauses) {
7839 cfg->spvars = g_hash_table_new (NULL, NULL);
7840 cfg->exvars = g_hash_table_new (NULL, NULL);
7842 /* handle exception clauses */
7843 for (i = 0; i < header->num_clauses; ++i) {
7844 MonoBasicBlock *try_bb;
7845 MonoExceptionClause *clause = &header->clauses [i];
7846 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7847 try_bb->real_offset = clause->try_offset;
7848 try_bb->try_start = TRUE;
7849 try_bb->region = ((i + 1) << 8) | clause->flags;
7850 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7851 tblock->real_offset = clause->handler_offset;
7852 tblock->flags |= BB_EXCEPTION_HANDLER;
7855 * Linking the try block with the EH block hinders inlining as we won't be able to
7856 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7858 if (COMPILE_LLVM (cfg))
7859 link_bblock (cfg, try_bb, tblock);
7861 if (*(ip + clause->handler_offset) == CEE_POP)
7862 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7864 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7865 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7866 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7867 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7868 MONO_ADD_INS (tblock, ins);
7870 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7871 /* finally clauses already have a seq point */
7872 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7873 MONO_ADD_INS (tblock, ins);
7876 /* todo: is a fault block unsafe to optimize? */
7877 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7878 tblock->flags |= BB_EXCEPTION_UNSAFE;
7881 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7883 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7885 /* catch and filter blocks get the exception object on the stack */
7886 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7887 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7889 /* mostly like handle_stack_args (), but just sets the input args */
7890 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7891 tblock->in_scount = 1;
7892 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7893 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7897 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7898 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7899 if (!cfg->compile_llvm) {
7900 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7901 ins->dreg = tblock->in_stack [0]->dreg;
7902 MONO_ADD_INS (tblock, ins);
7905 MonoInst *dummy_use;
7908 * Add a dummy use for the exvar so its liveness info will be
7911 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7914 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7915 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7916 tblock->flags |= BB_EXCEPTION_HANDLER;
7917 tblock->real_offset = clause->data.filter_offset;
7918 tblock->in_scount = 1;
7919 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7920 /* The filter block shares the exvar with the handler block */
7921 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7922 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7923 MONO_ADD_INS (tblock, ins);
7927 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7928 clause->data.catch_class &&
7929 cfg->generic_sharing_context &&
7930 mono_class_check_context_used (clause->data.catch_class)) {
7932 * In shared generic code with catch
7933 * clauses containing type variables
7934 * the exception handling code has to
7935 * be able to get to the rgctx.
7936 * Therefore we have to make sure that
7937 * the vtable/mrgctx argument (for
7938 * static or generic methods) or the
7939 * "this" argument (for non-static
7940 * methods) are live.
7942 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7943 mini_method_get_context (method)->method_inst ||
7944 method->klass->valuetype) {
7945 mono_get_vtable_var (cfg);
7947 MonoInst *dummy_use;
7949 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7954 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7955 cfg->cbb = start_bblock;
7956 cfg->args = arg_array;
7957 mono_save_args (cfg, sig, inline_args);
7960 /* FIRST CODE BLOCK */
7961 NEW_BBLOCK (cfg, bblock);
7962 bblock->cil_code = ip;
7966 ADD_BBLOCK (cfg, bblock);
7968 if (cfg->method == method) {
7969 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7970 if (breakpoint_id) {
7971 MONO_INST_NEW (cfg, ins, OP_BREAK);
7972 MONO_ADD_INS (bblock, ins);
7976 /* we use a separate basic block for the initialization code */
7977 NEW_BBLOCK (cfg, init_localsbb);
7978 cfg->bb_init = init_localsbb;
7979 init_localsbb->real_offset = cfg->real_offset;
7980 start_bblock->next_bb = init_localsbb;
7981 init_localsbb->next_bb = bblock;
7982 link_bblock (cfg, start_bblock, init_localsbb);
7983 link_bblock (cfg, init_localsbb, bblock);
7985 cfg->cbb = init_localsbb;
7987 if (cfg->gsharedvt && cfg->method == method) {
7988 MonoGSharedVtMethodInfo *info;
7989 MonoInst *var, *locals_var;
7992 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7993 info->method = cfg->method;
7994 info->count_entries = 16;
7995 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7996 cfg->gsharedvt_info = info;
7998 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7999 /* prevent it from being register allocated */
8000 //var->flags |= MONO_INST_VOLATILE;
8001 cfg->gsharedvt_info_var = var;
8003 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8004 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8006 /* Allocate locals */
8007 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8008 /* prevent it from being register allocated */
8009 //locals_var->flags |= MONO_INST_VOLATILE;
8010 cfg->gsharedvt_locals_var = locals_var;
8012 dreg = alloc_ireg (cfg);
8013 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8015 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8016 ins->dreg = locals_var->dreg;
8018 MONO_ADD_INS (cfg->cbb, ins);
8019 cfg->gsharedvt_locals_var_ins = ins;
8021 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8024 ins->flags |= MONO_INST_INIT;
8028 if (mono_security_core_clr_enabled ()) {
8029 /* check if this is native code, e.g. an icall or a p/invoke */
8030 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8031 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8033 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8034 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8036 /* if this ia a native call then it can only be JITted from platform code */
8037 if ((icall || pinvk) && method->klass && method->klass->image) {
8038 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8039 MonoException *ex = icall ? mono_get_exception_security () :
8040 mono_get_exception_method_access ();
8041 emit_throw_exception (cfg, ex);
8048 CHECK_CFG_EXCEPTION;
8050 if (header->code_size == 0)
8053 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8058 if (cfg->method == method)
8059 mono_debug_init_method (cfg, bblock, breakpoint_id);
8061 for (n = 0; n < header->num_locals; ++n) {
8062 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8067 /* We force the vtable variable here for all shared methods
8068 for the possibility that they might show up in a stack
8069 trace where their exact instantiation is needed. */
8070 if (cfg->generic_sharing_context && method == cfg->method) {
8071 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8072 mini_method_get_context (method)->method_inst ||
8073 method->klass->valuetype) {
8074 mono_get_vtable_var (cfg);
8076 /* FIXME: Is there a better way to do this?
8077 We need the variable live for the duration
8078 of the whole method. */
8079 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8083 /* add a check for this != NULL to inlined methods */
8084 if (is_virtual_call) {
8087 NEW_ARGLOAD (cfg, arg_ins, 0);
8088 MONO_ADD_INS (cfg->cbb, arg_ins);
8089 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8092 skip_dead_blocks = !dont_verify;
8093 if (skip_dead_blocks) {
8094 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8099 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8100 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8103 start_new_bblock = 0;
8106 if (cfg->method == method)
8107 cfg->real_offset = ip - header->code;
8109 cfg->real_offset = inline_offset;
8114 if (start_new_bblock) {
8115 bblock->cil_length = ip - bblock->cil_code;
8116 if (start_new_bblock == 2) {
8117 g_assert (ip == tblock->cil_code);
8119 GET_BBLOCK (cfg, tblock, ip);
8121 bblock->next_bb = tblock;
8124 start_new_bblock = 0;
8125 for (i = 0; i < bblock->in_scount; ++i) {
8126 if (cfg->verbose_level > 3)
8127 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8128 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8132 g_slist_free (class_inits);
8135 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8136 link_bblock (cfg, bblock, tblock);
8137 if (sp != stack_start) {
8138 handle_stack_args (cfg, stack_start, sp - stack_start);
8140 CHECK_UNVERIFIABLE (cfg);
8142 bblock->next_bb = tblock;
8145 for (i = 0; i < bblock->in_scount; ++i) {
8146 if (cfg->verbose_level > 3)
8147 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8148 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8151 g_slist_free (class_inits);
8156 if (skip_dead_blocks) {
8157 int ip_offset = ip - header->code;
8159 if (ip_offset == bb->end)
8163 int op_size = mono_opcode_size (ip, end);
8164 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8166 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8168 if (ip_offset + op_size == bb->end) {
8169 MONO_INST_NEW (cfg, ins, OP_NOP);
8170 MONO_ADD_INS (bblock, ins);
8171 start_new_bblock = 1;
8179 * Sequence points are points where the debugger can place a breakpoint.
8180 * Currently, we generate these automatically at points where the IL
8183 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8185 * Make methods interruptable at the beginning, and at the targets of
8186 * backward branches.
8187 * Also, do this at the start of every bblock in methods with clauses too,
8188 * to be able to handle instructions with inprecise control flow like
8190 * Backward branches are handled at the end of method-to-ir ().
8192 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8194 /* Avoid sequence points on empty IL like .volatile */
8195 // FIXME: Enable this
8196 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8197 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8198 if (sp != stack_start)
8199 ins->flags |= MONO_INST_NONEMPTY_STACK;
8200 MONO_ADD_INS (cfg->cbb, ins);
8203 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8206 bblock->real_offset = cfg->real_offset;
8208 if ((cfg->method == method) && cfg->coverage_info) {
8209 guint32 cil_offset = ip - header->code;
8210 cfg->coverage_info->data [cil_offset].cil_code = ip;
8212 /* TODO: Use an increment here */
8213 #if defined(TARGET_X86)
8214 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8215 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8217 MONO_ADD_INS (cfg->cbb, ins);
8219 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8220 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8224 if (cfg->verbose_level > 3)
8225 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8229 if (seq_points && !sym_seq_points && sp != stack_start) {
8231 * The C# compiler uses these nops to notify the JIT that it should
8232 * insert seq points.
8234 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8235 MONO_ADD_INS (cfg->cbb, ins);
8237 if (cfg->keep_cil_nops)
8238 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8240 MONO_INST_NEW (cfg, ins, OP_NOP);
8242 MONO_ADD_INS (bblock, ins);
8245 if (should_insert_brekpoint (cfg->method)) {
8246 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8248 MONO_INST_NEW (cfg, ins, OP_NOP);
8251 MONO_ADD_INS (bblock, ins);
8257 CHECK_STACK_OVF (1);
8258 n = (*ip)-CEE_LDARG_0;
8260 EMIT_NEW_ARGLOAD (cfg, ins, n);
8268 CHECK_STACK_OVF (1);
8269 n = (*ip)-CEE_LDLOC_0;
8271 EMIT_NEW_LOCLOAD (cfg, ins, n);
8280 n = (*ip)-CEE_STLOC_0;
8283 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8285 emit_stloc_ir (cfg, sp, header, n);
8292 CHECK_STACK_OVF (1);
8295 EMIT_NEW_ARGLOAD (cfg, ins, n);
8301 CHECK_STACK_OVF (1);
8304 NEW_ARGLOADA (cfg, ins, n);
8305 MONO_ADD_INS (cfg->cbb, ins);
8315 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8317 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8322 CHECK_STACK_OVF (1);
8325 EMIT_NEW_LOCLOAD (cfg, ins, n);
8329 case CEE_LDLOCA_S: {
8330 unsigned char *tmp_ip;
8332 CHECK_STACK_OVF (1);
8333 CHECK_LOCAL (ip [1]);
8335 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8341 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8350 CHECK_LOCAL (ip [1]);
8351 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8353 emit_stloc_ir (cfg, sp, header, ip [1]);
8358 CHECK_STACK_OVF (1);
8359 EMIT_NEW_PCONST (cfg, ins, NULL);
8360 ins->type = STACK_OBJ;
8365 CHECK_STACK_OVF (1);
8366 EMIT_NEW_ICONST (cfg, ins, -1);
8379 CHECK_STACK_OVF (1);
8380 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8386 CHECK_STACK_OVF (1);
8388 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8394 CHECK_STACK_OVF (1);
8395 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8401 CHECK_STACK_OVF (1);
8402 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8403 ins->type = STACK_I8;
8404 ins->dreg = alloc_dreg (cfg, STACK_I8);
8406 ins->inst_l = (gint64)read64 (ip);
8407 MONO_ADD_INS (bblock, ins);
8413 gboolean use_aotconst = FALSE;
8415 #ifdef TARGET_POWERPC
8416 /* FIXME: Clean this up */
8417 if (cfg->compile_aot)
8418 use_aotconst = TRUE;
8421 /* FIXME: we should really allocate this only late in the compilation process */
8422 f = mono_domain_alloc (cfg->domain, sizeof (float));
8424 CHECK_STACK_OVF (1);
8430 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8432 dreg = alloc_freg (cfg);
8433 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8434 ins->type = cfg->r4_stack_type;
8436 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8437 ins->type = cfg->r4_stack_type;
8438 ins->dreg = alloc_dreg (cfg, STACK_R8);
8440 MONO_ADD_INS (bblock, ins);
8450 gboolean use_aotconst = FALSE;
8452 #ifdef TARGET_POWERPC
8453 /* FIXME: Clean this up */
8454 if (cfg->compile_aot)
8455 use_aotconst = TRUE;
8458 /* FIXME: we should really allocate this only late in the compilation process */
8459 d = mono_domain_alloc (cfg->domain, sizeof (double));
8461 CHECK_STACK_OVF (1);
8467 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8469 dreg = alloc_freg (cfg);
8470 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8471 ins->type = STACK_R8;
8473 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8474 ins->type = STACK_R8;
8475 ins->dreg = alloc_dreg (cfg, STACK_R8);
8477 MONO_ADD_INS (bblock, ins);
8486 MonoInst *temp, *store;
8488 CHECK_STACK_OVF (1);
8492 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8493 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8495 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8498 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8511 if (sp [0]->type == STACK_R8)
8512 /* we need to pop the value from the x86 FP stack */
8513 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8519 INLINE_FAILURE ("jmp");
8520 GSHAREDVT_FAILURE (*ip);
8523 if (stack_start != sp)
8525 token = read32 (ip + 1);
8526 /* FIXME: check the signature matches */
8527 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8529 if (!cmethod || mono_loader_get_last_error ())
8532 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8533 GENERIC_SHARING_FAILURE (CEE_JMP);
8535 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8537 if (ARCH_HAVE_OP_TAIL_CALL) {
8538 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8541 /* Handle tail calls similarly to calls */
8542 n = fsig->param_count + fsig->hasthis;
8546 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8547 call->method = cmethod;
8548 call->tail_call = TRUE;
8549 call->signature = mono_method_signature (cmethod);
8550 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8551 call->inst.inst_p0 = cmethod;
8552 for (i = 0; i < n; ++i)
8553 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8555 mono_arch_emit_call (cfg, call);
8556 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8557 MONO_ADD_INS (bblock, (MonoInst*)call);
8559 for (i = 0; i < num_args; ++i)
8560 /* Prevent arguments from being optimized away */
8561 arg_array [i]->flags |= MONO_INST_VOLATILE;
8563 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8564 ins = (MonoInst*)call;
8565 ins->inst_p0 = cmethod;
8566 MONO_ADD_INS (bblock, ins);
8570 start_new_bblock = 1;
8575 MonoMethodSignature *fsig;
8578 token = read32 (ip + 1);
8582 //GSHAREDVT_FAILURE (*ip);
8587 fsig = mini_get_signature (method, token, generic_context);
8589 if (method->dynamic && fsig->pinvoke) {
8593 * This is a call through a function pointer using a pinvoke
8594 * signature. Have to create a wrapper and call that instead.
8595 * FIXME: This is very slow, need to create a wrapper at JIT time
8596 * instead based on the signature.
8598 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8599 EMIT_NEW_PCONST (cfg, args [1], fsig);
8601 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8604 n = fsig->param_count + fsig->hasthis;
8608 //g_assert (!virtual || fsig->hasthis);
8612 inline_costs += 10 * num_calls++;
8615 * Making generic calls out of gsharedvt methods.
8616 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8617 * patching gshared method addresses into a gsharedvt method.
8619 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8621 * We pass the address to the gsharedvt trampoline in the rgctx reg
8623 MonoInst *callee = addr;
8625 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8627 GSHAREDVT_FAILURE (*ip);
8629 addr = emit_get_rgctx_sig (cfg, context_used,
8630 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8631 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8635 /* Prevent inlining of methods with indirect calls */
8636 INLINE_FAILURE ("indirect call");
8638 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8643 * Instead of emitting an indirect call, emit a direct call
8644 * with the contents of the aotconst as the patch info.
8646 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8647 info_type = addr->inst_c1;
8648 info_data = addr->inst_p0;
8650 info_type = addr->inst_right->inst_c1;
8651 info_data = addr->inst_right->inst_left;
8654 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8655 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8660 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8664 /* End of call, INS should contain the result of the call, if any */
8666 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8668 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8671 CHECK_CFG_EXCEPTION;
8675 constrained_class = NULL;
8679 case CEE_CALLVIRT: {
8680 MonoInst *addr = NULL;
8681 MonoMethodSignature *fsig = NULL;
8683 int virtual = *ip == CEE_CALLVIRT;
8684 gboolean pass_imt_from_rgctx = FALSE;
8685 MonoInst *imt_arg = NULL;
8686 MonoInst *keep_this_alive = NULL;
8687 gboolean pass_vtable = FALSE;
8688 gboolean pass_mrgctx = FALSE;
8689 MonoInst *vtable_arg = NULL;
8690 gboolean check_this = FALSE;
8691 gboolean supported_tail_call = FALSE;
8692 gboolean tail_call = FALSE;
8693 gboolean need_seq_point = FALSE;
8694 guint32 call_opcode = *ip;
8695 gboolean emit_widen = TRUE;
8696 gboolean push_res = TRUE;
8697 gboolean skip_ret = FALSE;
8698 gboolean delegate_invoke = FALSE;
8699 gboolean direct_icall = FALSE;
8700 gboolean constrained_partial_call = FALSE;
8701 MonoMethod *cil_method;
8704 token = read32 (ip + 1);
8708 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8709 cil_method = cmethod;
8711 if (constrained_class) {
8712 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8713 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8714 g_assert (!cmethod->klass->valuetype);
8715 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
8716 constrained_partial_call = TRUE;
8720 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8721 if (cfg->verbose_level > 2)
8722 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8723 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8724 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8725 cfg->generic_sharing_context)) {
8726 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8730 if (cfg->verbose_level > 2)
8731 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8733 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8735 * This is needed since get_method_constrained can't find
8736 * the method in klass representing a type var.
8737 * The type var is guaranteed to be a reference type in this
8740 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8741 g_assert (!cmethod->klass->valuetype);
8743 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8749 if (!cmethod || mono_loader_get_last_error ())
8751 if (!dont_verify && !cfg->skip_visibility) {
8752 MonoMethod *target_method = cil_method;
8753 if (method->is_inflated) {
8754 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8756 if (!mono_method_can_access_method (method_definition, target_method) &&
8757 !mono_method_can_access_method (method, cil_method))
8758 METHOD_ACCESS_FAILURE (method, cil_method);
8761 if (mono_security_core_clr_enabled ())
8762 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8764 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8765 /* MS.NET seems to silently convert this to a callvirt */
8770 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8771 * converts to a callvirt.
8773 * tests/bug-515884.il is an example of this behavior
8775 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8776 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8777 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8781 if (!cmethod->klass->inited)
8782 if (!mono_class_init (cmethod->klass))
8783 TYPE_LOAD_ERROR (cmethod->klass);
8785 fsig = mono_method_signature (cmethod);
8788 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8789 mini_class_is_system_array (cmethod->klass)) {
8790 array_rank = cmethod->klass->rank;
8791 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8792 direct_icall = TRUE;
8793 } else if (fsig->pinvoke) {
8794 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8795 check_for_pending_exc, cfg->compile_aot);
8796 fsig = mono_method_signature (wrapper);
8797 } else if (constrained_class) {
8799 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8803 mono_save_token_info (cfg, image, token, cil_method);
8805 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8806 need_seq_point = TRUE;
8808 /* Don't support calls made using type arguments for now */
8810 if (cfg->gsharedvt) {
8811 if (mini_is_gsharedvt_signature (cfg, fsig))
8812 GSHAREDVT_FAILURE (*ip);
8816 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8817 g_assert_not_reached ();
8819 n = fsig->param_count + fsig->hasthis;
8821 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8824 if (!cfg->generic_sharing_context)
8825 g_assert (!mono_method_check_context_used (cmethod));
8829 //g_assert (!virtual || fsig->hasthis);
8833 if (constrained_class) {
8834 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8835 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8836 /* The 'Own method' case below */
8837 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8838 /* 'The type parameter is instantiated as a reference type' case below. */
8840 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
8841 CHECK_CFG_EXCEPTION;
8848 * We have the `constrained.' prefix opcode.
8850 if (constrained_partial_call) {
8851 gboolean need_box = TRUE;
8854 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8855 * called method is not known at compile time either. The called method could end up being
8856 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8857 * to box the receiver.
8858 * A simple solution would be to box always and make a normal virtual call, but that would
8859 * be bad performance wise.
8861 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8863 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8870 MonoBasicBlock *is_ref_bb, *end_bb;
8871 MonoInst *nonbox_call;
8874 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8876 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8877 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8879 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8881 NEW_BBLOCK (cfg, is_ref_bb);
8882 NEW_BBLOCK (cfg, end_bb);
8884 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8885 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8886 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8889 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8891 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8894 MONO_START_BB (cfg, is_ref_bb);
8895 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8896 ins->klass = constrained_class;
8897 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8898 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8902 MONO_START_BB (cfg, end_bb);
8905 nonbox_call->dreg = ins->dreg;
8907 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8908 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8909 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8912 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8914 * The type parameter is instantiated as a valuetype,
8915 * but that type doesn't override the method we're
8916 * calling, so we need to box `this'.
8918 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8919 ins->klass = constrained_class;
8920 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8921 CHECK_CFG_EXCEPTION;
8922 } else if (!constrained_class->valuetype) {
8923 int dreg = alloc_ireg_ref (cfg);
8926 * The type parameter is instantiated as a reference
8927 * type. We have a managed pointer on the stack, so
8928 * we need to dereference it here.
8930 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8931 ins->type = STACK_OBJ;
8934 if (cmethod->klass->valuetype) {
8937 /* Interface method */
8940 mono_class_setup_vtable (constrained_class);
8941 CHECK_TYPELOAD (constrained_class);
8942 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8944 TYPE_LOAD_ERROR (constrained_class);
8945 slot = mono_method_get_vtable_slot (cmethod);
8947 TYPE_LOAD_ERROR (cmethod->klass);
8948 cmethod = constrained_class->vtable [ioffset + slot];
8950 if (cmethod->klass == mono_defaults.enum_class) {
8951 /* Enum implements some interfaces, so treat this as the first case */
8952 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8953 ins->klass = constrained_class;
8954 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8955 CHECK_CFG_EXCEPTION;
8960 constrained_class = NULL;
8963 if (check_call_signature (cfg, fsig, sp))
8966 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8967 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8968 delegate_invoke = TRUE;
8971 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8973 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8974 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8982 * If the callee is a shared method, then its static cctor
8983 * might not get called after the call was patched.
8985 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8986 emit_generic_class_init (cfg, cmethod->klass);
8987 CHECK_TYPELOAD (cmethod->klass);
8990 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8992 if (cfg->generic_sharing_context) {
8993 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8995 context_used = mini_method_check_context_used (cfg, cmethod);
8997 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8998 /* Generic method interface
8999 calls are resolved via a
9000 helper function and don't
9002 if (!cmethod_context || !cmethod_context->method_inst)
9003 pass_imt_from_rgctx = TRUE;
9007 * If a shared method calls another
9008 * shared method then the caller must
9009 * have a generic sharing context
9010 * because the magic trampoline
9011 * requires it. FIXME: We shouldn't
9012 * have to force the vtable/mrgctx
9013 * variable here. Instead there
9014 * should be a flag in the cfg to
9015 * request a generic sharing context.
9018 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9019 mono_get_vtable_var (cfg);
9024 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9026 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9028 CHECK_TYPELOAD (cmethod->klass);
9029 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9034 g_assert (!vtable_arg);
9036 if (!cfg->compile_aot) {
9038 * emit_get_rgctx_method () calls mono_class_vtable () so check
9039 * for type load errors before.
9041 mono_class_setup_vtable (cmethod->klass);
9042 CHECK_TYPELOAD (cmethod->klass);
9045 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9047 /* !marshalbyref is needed to properly handle generic methods + remoting */
9048 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9049 MONO_METHOD_IS_FINAL (cmethod)) &&
9050 !mono_class_is_marshalbyref (cmethod->klass)) {
9057 if (pass_imt_from_rgctx) {
9058 g_assert (!pass_vtable);
9060 imt_arg = emit_get_rgctx_method (cfg, context_used,
9061 cmethod, MONO_RGCTX_INFO_METHOD);
9065 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9067 /* Calling virtual generic methods */
9068 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9069 !(MONO_METHOD_IS_FINAL (cmethod) &&
9070 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9071 fsig->generic_param_count &&
9072 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9073 MonoInst *this_temp, *this_arg_temp, *store;
9074 MonoInst *iargs [4];
9075 gboolean use_imt = FALSE;
9077 g_assert (fsig->is_inflated);
9079 /* Prevent inlining of methods that contain indirect calls */
9080 INLINE_FAILURE ("virtual generic call");
9082 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9083 GSHAREDVT_FAILURE (*ip);
9085 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9086 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9091 g_assert (!imt_arg);
9093 g_assert (cmethod->is_inflated);
9094 imt_arg = emit_get_rgctx_method (cfg, context_used,
9095 cmethod, MONO_RGCTX_INFO_METHOD);
9096 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9098 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9099 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9100 MONO_ADD_INS (bblock, store);
9102 /* FIXME: This should be a managed pointer */
9103 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9105 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9106 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9107 cmethod, MONO_RGCTX_INFO_METHOD);
9108 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9109 addr = mono_emit_jit_icall (cfg,
9110 mono_helper_compile_generic_method, iargs);
9112 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9114 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9121 * Implement a workaround for the inherent races involved in locking:
9127 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9128 * try block, the Exit () won't be executed, see:
9129 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9130 * To work around this, we extend such try blocks to include the last x bytes
9131 * of the Monitor.Enter () call.
9133 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9134 MonoBasicBlock *tbb;
9136 GET_BBLOCK (cfg, tbb, ip + 5);
9138 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9139 * from Monitor.Enter like ArgumentNullException.
9141 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9142 /* Mark this bblock as needing to be extended */
9143 tbb->extend_try_block = TRUE;
9147 /* Conversion to a JIT intrinsic */
9148 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9150 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9151 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9158 if ((cfg->opt & MONO_OPT_INLINE) &&
9159 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9160 mono_method_check_inlining (cfg, cmethod)) {
9162 gboolean always = FALSE;
9164 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9165 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9166 /* Prevent inlining of methods that call wrappers */
9167 INLINE_FAILURE ("wrapper call");
9168 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9172 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9174 cfg->real_offset += 5;
9176 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9177 /* *sp is already set by inline_method */
9182 inline_costs += costs;
9188 /* Tail recursion elimination */
9189 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9190 gboolean has_vtargs = FALSE;
9193 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9194 INLINE_FAILURE ("tail call");
9196 /* keep it simple */
9197 for (i = fsig->param_count - 1; i >= 0; i--) {
9198 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9203 for (i = 0; i < n; ++i)
9204 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9205 MONO_INST_NEW (cfg, ins, OP_BR);
9206 MONO_ADD_INS (bblock, ins);
9207 tblock = start_bblock->out_bb [0];
9208 link_bblock (cfg, bblock, tblock);
9209 ins->inst_target_bb = tblock;
9210 start_new_bblock = 1;
9212 /* skip the CEE_RET, too */
9213 if (ip_in_bb (cfg, bblock, ip + 5))
9220 inline_costs += 10 * num_calls++;
9223 * Making generic calls out of gsharedvt methods.
9224 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9225 * patching gshared method addresses into a gsharedvt method.
9227 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9228 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9229 MonoRgctxInfoType info_type;
9232 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9233 //GSHAREDVT_FAILURE (*ip);
9234 // disable for possible remoting calls
9235 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9236 GSHAREDVT_FAILURE (*ip);
9237 if (fsig->generic_param_count) {
9238 /* virtual generic call */
9239 g_assert (!imt_arg);
9240 /* Same as the virtual generic case above */
9241 imt_arg = emit_get_rgctx_method (cfg, context_used,
9242 cmethod, MONO_RGCTX_INFO_METHOD);
9243 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9245 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9246 /* This can happen when we call a fully instantiated iface method */
9247 imt_arg = emit_get_rgctx_method (cfg, context_used,
9248 cmethod, MONO_RGCTX_INFO_METHOD);
9253 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9254 keep_this_alive = sp [0];
9256 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9257 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9259 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9260 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9262 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9266 /* Generic sharing */
9269 * Use this if the callee is gsharedvt sharable too, since
9270 * at runtime we might find an instantiation so the call cannot
9271 * be patched (the 'no_patch' code path in mini-trampolines.c).
9273 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9274 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9275 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9276 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9277 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9278 INLINE_FAILURE ("gshared");
9280 g_assert (cfg->generic_sharing_context && cmethod);
9284 * We are compiling a call to a
9285 * generic method from shared code,
9286 * which means that we have to look up
9287 * the method in the rgctx and do an
9291 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9293 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9294 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9298 /* Direct calls to icalls */
9300 MonoMethod *wrapper;
9303 /* Inline the wrapper */
9304 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9306 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
9307 g_assert (costs > 0);
9308 cfg->real_offset += 5;
9310 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9311 /* *sp is already set by inline_method */
9316 inline_costs += costs;
9325 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9326 MonoInst *val = sp [fsig->param_count];
9328 if (val->type == STACK_OBJ) {
9329 MonoInst *iargs [2];
9334 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9337 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9338 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9339 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9340 emit_write_barrier (cfg, addr, val);
9341 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9342 GSHAREDVT_FAILURE (*ip);
9343 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9344 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9346 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9347 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9348 if (!cmethod->klass->element_class->valuetype && !readonly)
9349 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9350 CHECK_TYPELOAD (cmethod->klass);
9353 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9356 g_assert_not_reached ();
9363 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9367 /* Tail prefix / tail call optimization */
9369 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9370 /* FIXME: runtime generic context pointer for jumps? */
9371 /* FIXME: handle this for generic sharing eventually */
9372 if ((ins_flag & MONO_INST_TAILCALL) &&
9373 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9374 supported_tail_call = TRUE;
9376 if (supported_tail_call) {
9379 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9380 INLINE_FAILURE ("tail call");
9382 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9384 if (ARCH_HAVE_OP_TAIL_CALL) {
9385 /* Handle tail calls similarly to normal calls */
9388 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9390 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9391 call->tail_call = TRUE;
9392 call->method = cmethod;
9393 call->signature = mono_method_signature (cmethod);
9396 * We implement tail calls by storing the actual arguments into the
9397 * argument variables, then emitting a CEE_JMP.
9399 for (i = 0; i < n; ++i) {
9400 /* Prevent argument from being register allocated */
9401 arg_array [i]->flags |= MONO_INST_VOLATILE;
9402 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9404 ins = (MonoInst*)call;
9405 ins->inst_p0 = cmethod;
9406 ins->inst_p1 = arg_array [0];
9407 MONO_ADD_INS (bblock, ins);
9408 link_bblock (cfg, bblock, end_bblock);
9409 start_new_bblock = 1;
9411 // FIXME: Eliminate unreachable epilogs
9414 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9415 * only reachable from this call.
9417 GET_BBLOCK (cfg, tblock, ip + 5);
9418 if (tblock == bblock || tblock->in_count == 0)
9427 * Synchronized wrappers.
9428 * Its hard to determine where to replace a method with its synchronized
9429 * wrapper without causing an infinite recursion. The current solution is
9430 * to add the synchronized wrapper in the trampolines, and to
9431 * change the called method to a dummy wrapper, and resolve that wrapper
9432 * to the real method in mono_jit_compile_method ().
9434 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9435 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9436 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9437 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9441 INLINE_FAILURE ("call");
9442 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9443 imt_arg, vtable_arg);
9446 link_bblock (cfg, bblock, end_bblock);
9447 start_new_bblock = 1;
9449 // FIXME: Eliminate unreachable epilogs
9452 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9453 * only reachable from this call.
9455 GET_BBLOCK (cfg, tblock, ip + 5);
9456 if (tblock == bblock || tblock->in_count == 0)
9463 /* End of call, INS should contain the result of the call, if any */
9465 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9468 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9473 if (keep_this_alive) {
9474 MonoInst *dummy_use;
9476 /* See mono_emit_method_call_full () */
9477 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9480 CHECK_CFG_EXCEPTION;
9484 g_assert (*ip == CEE_RET);
9488 constrained_class = NULL;
9490 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9494 if (cfg->method != method) {
9495 /* return from inlined method */
9497 * If in_count == 0, that means the ret is unreachable due to
9498 * being preceeded by a throw. In that case, inline_method () will
9499 * handle setting the return value
9500 * (test case: test_0_inline_throw ()).
9502 if (return_var && cfg->cbb->in_count) {
9503 MonoType *ret_type = mono_method_signature (method)->ret;
9509 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9512 //g_assert (returnvar != -1);
9513 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9514 cfg->ret_var_set = TRUE;
9517 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9519 if (cfg->lmf_var && cfg->cbb->in_count)
9523 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9525 if (seq_points && !sym_seq_points) {
9527 * Place a seq point here too even through the IL stack is not
9528 * empty, so a step over on
9531 * will work correctly.
9533 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9534 MONO_ADD_INS (cfg->cbb, ins);
9537 g_assert (!return_var);
9541 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9544 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9547 if (!cfg->vret_addr) {
9550 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9552 EMIT_NEW_RETLOADA (cfg, ret_addr);
9554 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9555 ins->klass = mono_class_from_mono_type (ret_type);
9558 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9559 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9560 MonoInst *iargs [1];
9564 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9565 mono_arch_emit_setret (cfg, method, conv);
9567 mono_arch_emit_setret (cfg, method, *sp);
9570 mono_arch_emit_setret (cfg, method, *sp);
9575 if (sp != stack_start)
9577 MONO_INST_NEW (cfg, ins, OP_BR);
9579 ins->inst_target_bb = end_bblock;
9580 MONO_ADD_INS (bblock, ins);
9581 link_bblock (cfg, bblock, end_bblock);
9582 start_new_bblock = 1;
9586 MONO_INST_NEW (cfg, ins, OP_BR);
9588 target = ip + 1 + (signed char)(*ip);
9590 GET_BBLOCK (cfg, tblock, target);
9591 link_bblock (cfg, bblock, tblock);
9592 ins->inst_target_bb = tblock;
9593 if (sp != stack_start) {
9594 handle_stack_args (cfg, stack_start, sp - stack_start);
9596 CHECK_UNVERIFIABLE (cfg);
9598 MONO_ADD_INS (bblock, ins);
9599 start_new_bblock = 1;
9600 inline_costs += BRANCH_COST;
9614 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9616 target = ip + 1 + *(signed char*)ip;
9622 inline_costs += BRANCH_COST;
9626 MONO_INST_NEW (cfg, ins, OP_BR);
9629 target = ip + 4 + (gint32)read32(ip);
9631 GET_BBLOCK (cfg, tblock, target);
9632 link_bblock (cfg, bblock, tblock);
9633 ins->inst_target_bb = tblock;
9634 if (sp != stack_start) {
9635 handle_stack_args (cfg, stack_start, sp - stack_start);
9637 CHECK_UNVERIFIABLE (cfg);
9640 MONO_ADD_INS (bblock, ins);
9642 start_new_bblock = 1;
9643 inline_costs += BRANCH_COST;
9650 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9651 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9652 guint32 opsize = is_short ? 1 : 4;
9654 CHECK_OPSIZE (opsize);
9656 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9659 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9664 GET_BBLOCK (cfg, tblock, target);
9665 link_bblock (cfg, bblock, tblock);
9666 GET_BBLOCK (cfg, tblock, ip);
9667 link_bblock (cfg, bblock, tblock);
9669 if (sp != stack_start) {
9670 handle_stack_args (cfg, stack_start, sp - stack_start);
9671 CHECK_UNVERIFIABLE (cfg);
9674 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9675 cmp->sreg1 = sp [0]->dreg;
9676 type_from_op (cfg, cmp, sp [0], NULL);
9679 #if SIZEOF_REGISTER == 4
9680 if (cmp->opcode == OP_LCOMPARE_IMM) {
9681 /* Convert it to OP_LCOMPARE */
9682 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9683 ins->type = STACK_I8;
9684 ins->dreg = alloc_dreg (cfg, STACK_I8);
9686 MONO_ADD_INS (bblock, ins);
9687 cmp->opcode = OP_LCOMPARE;
9688 cmp->sreg2 = ins->dreg;
9691 MONO_ADD_INS (bblock, cmp);
9693 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9694 type_from_op (cfg, ins, sp [0], NULL);
9695 MONO_ADD_INS (bblock, ins);
9696 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9697 GET_BBLOCK (cfg, tblock, target);
9698 ins->inst_true_bb = tblock;
9699 GET_BBLOCK (cfg, tblock, ip);
9700 ins->inst_false_bb = tblock;
9701 start_new_bblock = 2;
9704 inline_costs += BRANCH_COST;
9719 MONO_INST_NEW (cfg, ins, *ip);
9721 target = ip + 4 + (gint32)read32(ip);
9727 inline_costs += BRANCH_COST;
9731 MonoBasicBlock **targets;
9732 MonoBasicBlock *default_bblock;
9733 MonoJumpInfoBBTable *table;
9734 int offset_reg = alloc_preg (cfg);
9735 int target_reg = alloc_preg (cfg);
9736 int table_reg = alloc_preg (cfg);
9737 int sum_reg = alloc_preg (cfg);
9738 gboolean use_op_switch;
9742 n = read32 (ip + 1);
9745 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9749 CHECK_OPSIZE (n * sizeof (guint32));
9750 target = ip + n * sizeof (guint32);
9752 GET_BBLOCK (cfg, default_bblock, target);
9753 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9755 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9756 for (i = 0; i < n; ++i) {
9757 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9758 targets [i] = tblock;
9759 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9763 if (sp != stack_start) {
9765 * Link the current bb with the targets as well, so handle_stack_args
9766 * will set their in_stack correctly.
9768 link_bblock (cfg, bblock, default_bblock);
9769 for (i = 0; i < n; ++i)
9770 link_bblock (cfg, bblock, targets [i]);
9772 handle_stack_args (cfg, stack_start, sp - stack_start);
9774 CHECK_UNVERIFIABLE (cfg);
9777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9781 for (i = 0; i < n; ++i)
9782 link_bblock (cfg, bblock, targets [i]);
9784 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9785 table->table = targets;
9786 table->table_size = n;
9788 use_op_switch = FALSE;
9790 /* ARM implements SWITCH statements differently */
9791 /* FIXME: Make it use the generic implementation */
9792 if (!cfg->compile_aot)
9793 use_op_switch = TRUE;
9796 if (COMPILE_LLVM (cfg))
9797 use_op_switch = TRUE;
9799 cfg->cbb->has_jump_table = 1;
9801 if (use_op_switch) {
9802 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9803 ins->sreg1 = src1->dreg;
9804 ins->inst_p0 = table;
9805 ins->inst_many_bb = targets;
9806 ins->klass = GUINT_TO_POINTER (n);
9807 MONO_ADD_INS (cfg->cbb, ins);
9809 if (sizeof (gpointer) == 8)
9810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9814 #if SIZEOF_REGISTER == 8
9815 /* The upper word might not be zero, and we add it to a 64 bit address later */
9816 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9819 if (cfg->compile_aot) {
9820 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9822 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9823 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9824 ins->inst_p0 = table;
9825 ins->dreg = table_reg;
9826 MONO_ADD_INS (cfg->cbb, ins);
9829 /* FIXME: Use load_memindex */
9830 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9832 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9834 start_new_bblock = 1;
9835 inline_costs += (BRANCH_COST * 2);
9855 dreg = alloc_freg (cfg);
9858 dreg = alloc_lreg (cfg);
9861 dreg = alloc_ireg_ref (cfg);
9864 dreg = alloc_preg (cfg);
9867 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9868 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9869 if (*ip == CEE_LDIND_R4)
9870 ins->type = cfg->r4_stack_type;
9871 ins->flags |= ins_flag;
9872 MONO_ADD_INS (bblock, ins);
9874 if (ins_flag & MONO_INST_VOLATILE) {
9875 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9876 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9892 if (ins_flag & MONO_INST_VOLATILE) {
9893 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9894 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9897 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9898 ins->flags |= ins_flag;
9901 MONO_ADD_INS (bblock, ins);
9903 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9904 emit_write_barrier (cfg, sp [0], sp [1]);
9913 MONO_INST_NEW (cfg, ins, (*ip));
9915 ins->sreg1 = sp [0]->dreg;
9916 ins->sreg2 = sp [1]->dreg;
9917 type_from_op (cfg, ins, sp [0], sp [1]);
9919 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9921 /* Use the immediate opcodes if possible */
9922 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9923 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9924 if (imm_opcode != -1) {
9925 ins->opcode = imm_opcode;
9926 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9929 NULLIFY_INS (sp [1]);
9933 MONO_ADD_INS ((cfg)->cbb, (ins));
9935 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
9952 MONO_INST_NEW (cfg, ins, (*ip));
9954 ins->sreg1 = sp [0]->dreg;
9955 ins->sreg2 = sp [1]->dreg;
9956 type_from_op (cfg, ins, sp [0], sp [1]);
9958 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9959 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9961 /* FIXME: Pass opcode to is_inst_imm */
9963 /* Use the immediate opcodes if possible */
9964 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9967 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9968 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9969 /* Keep emulated opcodes which are optimized away later */
9970 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9971 imm_opcode = mono_op_to_op_imm (ins->opcode);
9974 if (imm_opcode != -1) {
9975 ins->opcode = imm_opcode;
9976 if (sp [1]->opcode == OP_I8CONST) {
9977 #if SIZEOF_REGISTER == 8
9978 ins->inst_imm = sp [1]->inst_l;
9980 ins->inst_ls_word = sp [1]->inst_ls_word;
9981 ins->inst_ms_word = sp [1]->inst_ms_word;
9985 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9988 /* Might be followed by an instruction added by add_widen_op */
9989 if (sp [1]->next == NULL)
9990 NULLIFY_INS (sp [1]);
9993 MONO_ADD_INS ((cfg)->cbb, (ins));
9995 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10008 case CEE_CONV_OVF_I8:
10009 case CEE_CONV_OVF_U8:
10010 case CEE_CONV_R_UN:
10013 /* Special case this earlier so we have long constants in the IR */
10014 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10015 int data = sp [-1]->inst_c0;
10016 sp [-1]->opcode = OP_I8CONST;
10017 sp [-1]->type = STACK_I8;
10018 #if SIZEOF_REGISTER == 8
10019 if ((*ip) == CEE_CONV_U8)
10020 sp [-1]->inst_c0 = (guint32)data;
10022 sp [-1]->inst_c0 = data;
10024 sp [-1]->inst_ls_word = data;
10025 if ((*ip) == CEE_CONV_U8)
10026 sp [-1]->inst_ms_word = 0;
10028 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10030 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10037 case CEE_CONV_OVF_I4:
10038 case CEE_CONV_OVF_I1:
10039 case CEE_CONV_OVF_I2:
10040 case CEE_CONV_OVF_I:
10041 case CEE_CONV_OVF_U:
10044 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10045 ADD_UNOP (CEE_CONV_OVF_I8);
10052 case CEE_CONV_OVF_U1:
10053 case CEE_CONV_OVF_U2:
10054 case CEE_CONV_OVF_U4:
10057 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10058 ADD_UNOP (CEE_CONV_OVF_U8);
10065 case CEE_CONV_OVF_I1_UN:
10066 case CEE_CONV_OVF_I2_UN:
10067 case CEE_CONV_OVF_I4_UN:
10068 case CEE_CONV_OVF_I8_UN:
10069 case CEE_CONV_OVF_U1_UN:
10070 case CEE_CONV_OVF_U2_UN:
10071 case CEE_CONV_OVF_U4_UN:
10072 case CEE_CONV_OVF_U8_UN:
10073 case CEE_CONV_OVF_I_UN:
10074 case CEE_CONV_OVF_U_UN:
10081 CHECK_CFG_EXCEPTION;
10085 case CEE_ADD_OVF_UN:
10087 case CEE_MUL_OVF_UN:
10089 case CEE_SUB_OVF_UN:
10095 GSHAREDVT_FAILURE (*ip);
10098 token = read32 (ip + 1);
10099 klass = mini_get_class (method, token, generic_context);
10100 CHECK_TYPELOAD (klass);
10102 if (generic_class_is_reference_type (cfg, klass)) {
10103 MonoInst *store, *load;
10104 int dreg = alloc_ireg_ref (cfg);
10106 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10107 load->flags |= ins_flag;
10108 MONO_ADD_INS (cfg->cbb, load);
10110 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10111 store->flags |= ins_flag;
10112 MONO_ADD_INS (cfg->cbb, store);
10114 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10115 emit_write_barrier (cfg, sp [0], sp [1]);
10117 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10123 int loc_index = -1;
10129 token = read32 (ip + 1);
10130 klass = mini_get_class (method, token, generic_context);
10131 CHECK_TYPELOAD (klass);
10133 /* Optimize the common ldobj+stloc combination */
10136 loc_index = ip [6];
10143 loc_index = ip [5] - CEE_STLOC_0;
10150 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10151 CHECK_LOCAL (loc_index);
10153 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10154 ins->dreg = cfg->locals [loc_index]->dreg;
10155 ins->flags |= ins_flag;
10158 if (ins_flag & MONO_INST_VOLATILE) {
10159 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10160 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10166 /* Optimize the ldobj+stobj combination */
10167 /* The reference case ends up being a load+store anyway */
10168 /* Skip this if the operation is volatile. */
10169 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10174 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10181 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10182 ins->flags |= ins_flag;
10185 if (ins_flag & MONO_INST_VOLATILE) {
10186 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10187 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10196 CHECK_STACK_OVF (1);
10198 n = read32 (ip + 1);
10200 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10201 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10202 ins->type = STACK_OBJ;
10205 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10206 MonoInst *iargs [1];
10207 char *str = mono_method_get_wrapper_data (method, n);
10209 if (cfg->compile_aot)
10210 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10212 EMIT_NEW_PCONST (cfg, iargs [0], str);
10213 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10215 if (cfg->opt & MONO_OPT_SHARED) {
10216 MonoInst *iargs [3];
10218 if (cfg->compile_aot) {
10219 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10221 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10222 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10223 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10224 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10225 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10227 if (bblock->out_of_line) {
10228 MonoInst *iargs [2];
10230 if (image == mono_defaults.corlib) {
10232 * Avoid relocations in AOT and save some space by using a
10233 * version of helper_ldstr specialized to mscorlib.
10235 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10236 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10238 /* Avoid creating the string object */
10239 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10240 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10241 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10245 if (cfg->compile_aot) {
10246 NEW_LDSTRCONST (cfg, ins, image, n);
10248 MONO_ADD_INS (bblock, ins);
10251 NEW_PCONST (cfg, ins, NULL);
10252 ins->type = STACK_OBJ;
10253 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10255 OUT_OF_MEMORY_FAILURE;
10258 MONO_ADD_INS (bblock, ins);
10267 MonoInst *iargs [2];
10268 MonoMethodSignature *fsig;
10271 MonoInst *vtable_arg = NULL;
10274 token = read32 (ip + 1);
10275 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10276 if (!cmethod || mono_loader_get_last_error ())
10278 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10281 mono_save_token_info (cfg, image, token, cmethod);
10283 if (!mono_class_init (cmethod->klass))
10284 TYPE_LOAD_ERROR (cmethod->klass);
10286 context_used = mini_method_check_context_used (cfg, cmethod);
10288 if (mono_security_core_clr_enabled ())
10289 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10291 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10292 emit_generic_class_init (cfg, cmethod->klass);
10293 CHECK_TYPELOAD (cmethod->klass);
10297 if (cfg->gsharedvt) {
10298 if (mini_is_gsharedvt_variable_signature (sig))
10299 GSHAREDVT_FAILURE (*ip);
10303 n = fsig->param_count;
10307 * Generate smaller code for the common newobj <exception> instruction in
10308 * argument checking code.
10310 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10311 is_exception_class (cmethod->klass) && n <= 2 &&
10312 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10313 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10314 MonoInst *iargs [3];
10318 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10321 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10324 iargs [1] = sp [0];
10325 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10328 iargs [1] = sp [0];
10329 iargs [2] = sp [1];
10330 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10333 g_assert_not_reached ();
10341 /* move the args to allow room for 'this' in the first position */
10347 /* check_call_signature () requires sp[0] to be set */
10348 this_ins.type = STACK_OBJ;
10349 sp [0] = &this_ins;
10350 if (check_call_signature (cfg, fsig, sp))
10355 if (mini_class_is_system_array (cmethod->klass)) {
10356 *sp = emit_get_rgctx_method (cfg, context_used,
10357 cmethod, MONO_RGCTX_INFO_METHOD);
10359 /* Avoid varargs in the common case */
10360 if (fsig->param_count == 1)
10361 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10362 else if (fsig->param_count == 2)
10363 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10364 else if (fsig->param_count == 3)
10365 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10366 else if (fsig->param_count == 4)
10367 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10369 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10370 } else if (cmethod->string_ctor) {
10371 g_assert (!context_used);
10372 g_assert (!vtable_arg);
10373 /* we simply pass a null pointer */
10374 EMIT_NEW_PCONST (cfg, *sp, NULL);
10375 /* now call the string ctor */
10376 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10378 if (cmethod->klass->valuetype) {
10379 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10380 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10381 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10386 * The code generated by mini_emit_virtual_call () expects
10387 * iargs [0] to be a boxed instance, but luckily the vcall
10388 * will be transformed into a normal call there.
10390 } else if (context_used) {
10391 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10394 MonoVTable *vtable = NULL;
10396 if (!cfg->compile_aot)
10397 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10398 CHECK_TYPELOAD (cmethod->klass);
10401 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10402 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10403 * As a workaround, we call class cctors before allocating objects.
10405 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10406 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10407 if (cfg->verbose_level > 2)
10408 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10409 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10412 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10415 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10418 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10420 /* Now call the actual ctor */
10421 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10422 CHECK_CFG_EXCEPTION;
10425 if (alloc == NULL) {
10427 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10428 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10436 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10437 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10440 case CEE_CASTCLASS:
10444 token = read32 (ip + 1);
10445 klass = mini_get_class (method, token, generic_context);
10446 CHECK_TYPELOAD (klass);
10447 if (sp [0]->type != STACK_OBJ)
10450 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10451 CHECK_CFG_EXCEPTION;
10460 token = read32 (ip + 1);
10461 klass = mini_get_class (method, token, generic_context);
10462 CHECK_TYPELOAD (klass);
10463 if (sp [0]->type != STACK_OBJ)
10466 context_used = mini_class_check_context_used (cfg, klass);
10468 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10469 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10470 MonoInst *args [3];
10477 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10480 if (cfg->compile_aot) {
10481 idx = get_castclass_cache_idx (cfg);
10482 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10484 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10487 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10490 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10491 MonoMethod *mono_isinst;
10492 MonoInst *iargs [1];
10495 mono_isinst = mono_marshal_get_isinst (klass);
10496 iargs [0] = sp [0];
10498 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10499 iargs, ip, cfg->real_offset, TRUE, &bblock);
10500 CHECK_CFG_EXCEPTION;
10501 g_assert (costs > 0);
10504 cfg->real_offset += 5;
10508 inline_costs += costs;
10511 ins = handle_isinst (cfg, klass, *sp, context_used);
10512 CHECK_CFG_EXCEPTION;
10519 case CEE_UNBOX_ANY: {
10520 MonoInst *res, *addr;
10525 token = read32 (ip + 1);
10526 klass = mini_get_class (method, token, generic_context);
10527 CHECK_TYPELOAD (klass);
10529 mono_save_token_info (cfg, image, token, klass);
10531 context_used = mini_class_check_context_used (cfg, klass);
10533 if (mini_is_gsharedvt_klass (cfg, klass)) {
10534 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10536 } else if (generic_class_is_reference_type (cfg, klass)) {
10537 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10538 CHECK_CFG_EXCEPTION;
10539 } else if (mono_class_is_nullable (klass)) {
10540 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10542 addr = handle_unbox (cfg, klass, sp, context_used);
10544 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10555 MonoClass *enum_class;
10556 MonoMethod *has_flag;
10562 token = read32 (ip + 1);
10563 klass = mini_get_class (method, token, generic_context);
10564 CHECK_TYPELOAD (klass);
10566 mono_save_token_info (cfg, image, token, klass);
10568 context_used = mini_class_check_context_used (cfg, klass);
10570 if (generic_class_is_reference_type (cfg, klass)) {
10576 if (klass == mono_defaults.void_class)
10578 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10580 /* frequent check in generic code: box (struct), brtrue */
10585 * <push int/long ptr>
10588 * constrained. MyFlags
10589 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10591 * If we find this sequence and the operand types on box and constrained
10592 * are equal, we can emit a specialized instruction sequence instead of
10593 * the very slow HasFlag () call.
10595 if ((cfg->opt & MONO_OPT_INTRINS) &&
10596 /* Cheap checks first. */
10597 ip + 5 + 6 + 5 < end &&
10598 ip [5] == CEE_PREFIX1 &&
10599 ip [6] == CEE_CONSTRAINED_ &&
10600 ip [11] == CEE_CALLVIRT &&
10601 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10602 mono_class_is_enum (klass) &&
10603 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10604 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10605 has_flag->klass == mono_defaults.enum_class &&
10606 !strcmp (has_flag->name, "HasFlag") &&
10607 has_flag->signature->hasthis &&
10608 has_flag->signature->param_count == 1) {
10609 CHECK_TYPELOAD (enum_class);
10611 if (enum_class == klass) {
10612 MonoInst *enum_this, *enum_flag;
10617 enum_this = sp [0];
10618 enum_flag = sp [1];
10620 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10625 // FIXME: LLVM can't handle the inconsistent bb linking
10626 if (!mono_class_is_nullable (klass) &&
10627 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10628 (ip [5] == CEE_BRTRUE ||
10629 ip [5] == CEE_BRTRUE_S ||
10630 ip [5] == CEE_BRFALSE ||
10631 ip [5] == CEE_BRFALSE_S)) {
10632 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10634 MonoBasicBlock *true_bb, *false_bb;
10638 if (cfg->verbose_level > 3) {
10639 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10640 printf ("<box+brtrue opt>\n");
10645 case CEE_BRFALSE_S:
10648 target = ip + 1 + (signed char)(*ip);
10655 target = ip + 4 + (gint)(read32 (ip));
10659 g_assert_not_reached ();
10663 * We need to link both bblocks, since it is needed for handling stack
10664 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10665 * Branching to only one of them would lead to inconsistencies, so
10666 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10668 GET_BBLOCK (cfg, true_bb, target);
10669 GET_BBLOCK (cfg, false_bb, ip);
10671 mono_link_bblock (cfg, cfg->cbb, true_bb);
10672 mono_link_bblock (cfg, cfg->cbb, false_bb);
10674 if (sp != stack_start) {
10675 handle_stack_args (cfg, stack_start, sp - stack_start);
10677 CHECK_UNVERIFIABLE (cfg);
10680 if (COMPILE_LLVM (cfg)) {
10681 dreg = alloc_ireg (cfg);
10682 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10685 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10687 /* The JIT can't eliminate the iconst+compare */
10688 MONO_INST_NEW (cfg, ins, OP_BR);
10689 ins->inst_target_bb = is_true ? true_bb : false_bb;
10690 MONO_ADD_INS (cfg->cbb, ins);
10693 start_new_bblock = 1;
10697 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10699 CHECK_CFG_EXCEPTION;
10708 token = read32 (ip + 1);
10709 klass = mini_get_class (method, token, generic_context);
10710 CHECK_TYPELOAD (klass);
10712 mono_save_token_info (cfg, image, token, klass);
10714 context_used = mini_class_check_context_used (cfg, klass);
10716 if (mono_class_is_nullable (klass)) {
10719 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10720 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10724 ins = handle_unbox (cfg, klass, sp, context_used);
10737 MonoClassField *field;
10738 #ifndef DISABLE_REMOTING
10742 gboolean is_instance;
10744 gpointer addr = NULL;
10745 gboolean is_special_static;
10747 MonoInst *store_val = NULL;
10748 MonoInst *thread_ins;
10751 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10753 if (op == CEE_STFLD) {
10756 store_val = sp [1];
10761 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10763 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10766 if (op == CEE_STSFLD) {
10769 store_val = sp [0];
10774 token = read32 (ip + 1);
10775 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10776 field = mono_method_get_wrapper_data (method, token);
10777 klass = field->parent;
10780 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10783 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10784 FIELD_ACCESS_FAILURE (method, field);
10785 mono_class_init (klass);
10787 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10790 /* if the class is Critical then transparent code cannot access it's fields */
10791 if (!is_instance && mono_security_core_clr_enabled ())
10792 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10794 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10795 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10796 if (mono_security_core_clr_enabled ())
10797 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10801 * LDFLD etc. is usable on static fields as well, so convert those cases to
10804 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10816 g_assert_not_reached ();
10818 is_instance = FALSE;
10821 context_used = mini_class_check_context_used (cfg, klass);
10823 /* INSTANCE CASE */
10825 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10826 if (op == CEE_STFLD) {
10827 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10829 #ifndef DISABLE_REMOTING
10830 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10831 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10832 MonoInst *iargs [5];
10834 GSHAREDVT_FAILURE (op);
10836 iargs [0] = sp [0];
10837 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10838 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10839 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10841 iargs [4] = sp [1];
10843 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10844 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10845 iargs, ip, cfg->real_offset, TRUE, &bblock);
10846 CHECK_CFG_EXCEPTION;
10847 g_assert (costs > 0);
10849 cfg->real_offset += 5;
10851 inline_costs += costs;
10853 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10860 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10862 if (mini_is_gsharedvt_klass (cfg, klass)) {
10863 MonoInst *offset_ins;
10865 context_used = mini_class_check_context_used (cfg, klass);
10867 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10868 dreg = alloc_ireg_mp (cfg);
10869 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10870 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10871 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10873 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10875 if (sp [0]->opcode != OP_LDADDR)
10876 store->flags |= MONO_INST_FAULT;
10878 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10879 /* insert call to write barrier */
10883 dreg = alloc_ireg_mp (cfg);
10884 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10885 emit_write_barrier (cfg, ptr, sp [1]);
10888 store->flags |= ins_flag;
10895 #ifndef DISABLE_REMOTING
10896 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10897 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10898 MonoInst *iargs [4];
10900 GSHAREDVT_FAILURE (op);
10902 iargs [0] = sp [0];
10903 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10904 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10905 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10906 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10907 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10908 iargs, ip, cfg->real_offset, TRUE, &bblock);
10909 CHECK_CFG_EXCEPTION;
10910 g_assert (costs > 0);
10912 cfg->real_offset += 5;
10916 inline_costs += costs;
10918 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10924 if (sp [0]->type == STACK_VTYPE) {
10927 /* Have to compute the address of the variable */
10929 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10931 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10933 g_assert (var->klass == klass);
10935 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10939 if (op == CEE_LDFLDA) {
10940 if (is_magic_tls_access (field)) {
10941 GSHAREDVT_FAILURE (*ip);
10943 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10945 if (sp [0]->type == STACK_OBJ) {
10946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10947 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10950 dreg = alloc_ireg_mp (cfg);
10952 if (mini_is_gsharedvt_klass (cfg, klass)) {
10953 MonoInst *offset_ins;
10955 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10956 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10958 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10960 ins->klass = mono_class_from_mono_type (field->type);
10961 ins->type = STACK_MP;
10967 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10969 if (mini_is_gsharedvt_klass (cfg, klass)) {
10970 MonoInst *offset_ins;
10972 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10973 dreg = alloc_ireg_mp (cfg);
10974 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10975 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10977 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10979 load->flags |= ins_flag;
10980 if (sp [0]->opcode != OP_LDADDR)
10981 load->flags |= MONO_INST_FAULT;
10993 context_used = mini_class_check_context_used (cfg, klass);
10995 ftype = mono_field_get_type (field);
10997 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11000 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11001 * to be called here.
11003 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11004 mono_class_vtable (cfg->domain, klass);
11005 CHECK_TYPELOAD (klass);
11007 mono_domain_lock (cfg->domain);
11008 if (cfg->domain->special_static_fields)
11009 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11010 mono_domain_unlock (cfg->domain);
11012 is_special_static = mono_class_field_is_special_static (field);
11014 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11015 thread_ins = mono_get_thread_intrinsic (cfg);
11019 /* Generate IR to compute the field address */
11020 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11022 * Fast access to TLS data
11023 * Inline version of get_thread_static_data () in
11027 int idx, static_data_reg, array_reg, dreg;
11029 GSHAREDVT_FAILURE (op);
11031 MONO_ADD_INS (cfg->cbb, thread_ins);
11032 static_data_reg = alloc_ireg (cfg);
11033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11035 if (cfg->compile_aot) {
11036 int offset_reg, offset2_reg, idx_reg;
11038 /* For TLS variables, this will return the TLS offset */
11039 EMIT_NEW_SFLDACONST (cfg, ins, field);
11040 offset_reg = ins->dreg;
11041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11042 idx_reg = alloc_ireg (cfg);
11043 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11045 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11046 array_reg = alloc_ireg (cfg);
11047 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11048 offset2_reg = alloc_ireg (cfg);
11049 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11051 dreg = alloc_ireg (cfg);
11052 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11054 offset = (gsize)addr & 0x7fffffff;
11055 idx = offset & 0x3f;
11057 array_reg = alloc_ireg (cfg);
11058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11059 dreg = alloc_ireg (cfg);
11060 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11062 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11063 (cfg->compile_aot && is_special_static) ||
11064 (context_used && is_special_static)) {
11065 MonoInst *iargs [2];
11067 g_assert (field->parent);
11068 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11069 if (context_used) {
11070 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11071 field, MONO_RGCTX_INFO_CLASS_FIELD);
11073 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11075 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11076 } else if (context_used) {
11077 MonoInst *static_data;
11080 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11081 method->klass->name_space, method->klass->name, method->name,
11082 depth, field->offset);
11085 if (mono_class_needs_cctor_run (klass, method))
11086 emit_generic_class_init (cfg, klass);
11089 * The pointer we're computing here is
11091 * super_info.static_data + field->offset
11093 static_data = emit_get_rgctx_klass (cfg, context_used,
11094 klass, MONO_RGCTX_INFO_STATIC_DATA);
11096 if (mini_is_gsharedvt_klass (cfg, klass)) {
11097 MonoInst *offset_ins;
11099 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11100 dreg = alloc_ireg_mp (cfg);
11101 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11102 } else if (field->offset == 0) {
11105 int addr_reg = mono_alloc_preg (cfg);
11106 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11108 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11109 MonoInst *iargs [2];
11111 g_assert (field->parent);
11112 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11113 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11114 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11116 MonoVTable *vtable = NULL;
11118 if (!cfg->compile_aot)
11119 vtable = mono_class_vtable (cfg->domain, klass);
11120 CHECK_TYPELOAD (klass);
11123 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11124 if (!(g_slist_find (class_inits, klass))) {
11125 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11126 if (cfg->verbose_level > 2)
11127 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11128 class_inits = g_slist_prepend (class_inits, klass);
11131 if (cfg->run_cctors) {
11133 /* This makes so that inline cannot trigger */
11134 /* .cctors: too many apps depend on them */
11135 /* running with a specific order... */
11137 if (! vtable->initialized)
11138 INLINE_FAILURE ("class init");
11139 ex = mono_runtime_class_init_full (vtable, FALSE);
11141 set_exception_object (cfg, ex);
11142 goto exception_exit;
11146 if (cfg->compile_aot)
11147 EMIT_NEW_SFLDACONST (cfg, ins, field);
11150 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11152 EMIT_NEW_PCONST (cfg, ins, addr);
11155 MonoInst *iargs [1];
11156 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11157 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11161 /* Generate IR to do the actual load/store operation */
11163 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11164 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11165 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11168 if (op == CEE_LDSFLDA) {
11169 ins->klass = mono_class_from_mono_type (ftype);
11170 ins->type = STACK_PTR;
11172 } else if (op == CEE_STSFLD) {
11175 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11176 store->flags |= ins_flag;
11178 gboolean is_const = FALSE;
11179 MonoVTable *vtable = NULL;
11180 gpointer addr = NULL;
11182 if (!context_used) {
11183 vtable = mono_class_vtable (cfg->domain, klass);
11184 CHECK_TYPELOAD (klass);
11186 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11187 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11188 int ro_type = ftype->type;
11190 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11191 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11192 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11195 GSHAREDVT_FAILURE (op);
11197 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11200 case MONO_TYPE_BOOLEAN:
11202 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11206 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11209 case MONO_TYPE_CHAR:
11211 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11215 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11220 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11224 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11229 case MONO_TYPE_PTR:
11230 case MONO_TYPE_FNPTR:
11231 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11232 type_to_eval_stack_type ((cfg), field->type, *sp);
11235 case MONO_TYPE_STRING:
11236 case MONO_TYPE_OBJECT:
11237 case MONO_TYPE_CLASS:
11238 case MONO_TYPE_SZARRAY:
11239 case MONO_TYPE_ARRAY:
11240 if (!mono_gc_is_moving ()) {
11241 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11242 type_to_eval_stack_type ((cfg), field->type, *sp);
11250 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11255 case MONO_TYPE_VALUETYPE:
11265 CHECK_STACK_OVF (1);
11267 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11268 load->flags |= ins_flag;
11274 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11275 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11276 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11287 token = read32 (ip + 1);
11288 klass = mini_get_class (method, token, generic_context);
11289 CHECK_TYPELOAD (klass);
11290 if (ins_flag & MONO_INST_VOLATILE) {
11291 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11292 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11294 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11295 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11296 ins->flags |= ins_flag;
11297 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11298 generic_class_is_reference_type (cfg, klass)) {
11299 /* insert call to write barrier */
11300 emit_write_barrier (cfg, sp [0], sp [1]);
11312 const char *data_ptr;
11314 guint32 field_token;
11320 token = read32 (ip + 1);
11322 klass = mini_get_class (method, token, generic_context);
11323 CHECK_TYPELOAD (klass);
11325 context_used = mini_class_check_context_used (cfg, klass);
11327 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11328 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11329 ins->sreg1 = sp [0]->dreg;
11330 ins->type = STACK_I4;
11331 ins->dreg = alloc_ireg (cfg);
11332 MONO_ADD_INS (cfg->cbb, ins);
11333 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11336 if (context_used) {
11337 MonoInst *args [3];
11338 MonoClass *array_class = mono_array_class_get (klass, 1);
11339 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11341 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11344 args [0] = emit_get_rgctx_klass (cfg, context_used,
11345 array_class, MONO_RGCTX_INFO_VTABLE);
11350 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11352 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11354 if (cfg->opt & MONO_OPT_SHARED) {
11355 /* Decompose now to avoid problems with references to the domainvar */
11356 MonoInst *iargs [3];
11358 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11359 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11360 iargs [2] = sp [0];
11362 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11364 /* Decompose later since it is needed by abcrem */
11365 MonoClass *array_type = mono_array_class_get (klass, 1);
11366 mono_class_vtable (cfg->domain, array_type);
11367 CHECK_TYPELOAD (array_type);
11369 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11370 ins->dreg = alloc_ireg_ref (cfg);
11371 ins->sreg1 = sp [0]->dreg;
11372 ins->inst_newa_class = klass;
11373 ins->type = STACK_OBJ;
11374 ins->klass = array_type;
11375 MONO_ADD_INS (cfg->cbb, ins);
11376 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11377 cfg->cbb->has_array_access = TRUE;
11379 /* Needed so mono_emit_load_get_addr () gets called */
11380 mono_get_got_var (cfg);
11390 * we inline/optimize the initialization sequence if possible.
11391 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11392 * for small sizes open code the memcpy
11393 * ensure the rva field is big enough
11395 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11396 MonoMethod *memcpy_method = get_memcpy_method ();
11397 MonoInst *iargs [3];
11398 int add_reg = alloc_ireg_mp (cfg);
11400 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11401 if (cfg->compile_aot) {
11402 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11404 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11406 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11407 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11416 if (sp [0]->type != STACK_OBJ)
11419 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11420 ins->dreg = alloc_preg (cfg);
11421 ins->sreg1 = sp [0]->dreg;
11422 ins->type = STACK_I4;
11423 /* This flag will be inherited by the decomposition */
11424 ins->flags |= MONO_INST_FAULT;
11425 MONO_ADD_INS (cfg->cbb, ins);
11426 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11427 cfg->cbb->has_array_access = TRUE;
11435 if (sp [0]->type != STACK_OBJ)
11438 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11440 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11441 CHECK_TYPELOAD (klass);
11442 /* we need to make sure that this array is exactly the type it needs
11443 * to be for correctness. the wrappers are lax with their usage
11444 * so we need to ignore them here
11446 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11447 MonoClass *array_class = mono_array_class_get (klass, 1);
11448 mini_emit_check_array_type (cfg, sp [0], array_class);
11449 CHECK_TYPELOAD (array_class);
11453 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11458 case CEE_LDELEM_I1:
11459 case CEE_LDELEM_U1:
11460 case CEE_LDELEM_I2:
11461 case CEE_LDELEM_U2:
11462 case CEE_LDELEM_I4:
11463 case CEE_LDELEM_U4:
11464 case CEE_LDELEM_I8:
11466 case CEE_LDELEM_R4:
11467 case CEE_LDELEM_R8:
11468 case CEE_LDELEM_REF: {
11474 if (*ip == CEE_LDELEM) {
11476 token = read32 (ip + 1);
11477 klass = mini_get_class (method, token, generic_context);
11478 CHECK_TYPELOAD (klass);
11479 mono_class_init (klass);
11482 klass = array_access_to_klass (*ip);
11484 if (sp [0]->type != STACK_OBJ)
11487 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11489 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11490 // FIXME-VT: OP_ICONST optimization
11491 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11492 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11493 ins->opcode = OP_LOADV_MEMBASE;
11494 } else if (sp [1]->opcode == OP_ICONST) {
11495 int array_reg = sp [0]->dreg;
11496 int index_reg = sp [1]->dreg;
11497 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11499 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11500 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11502 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11503 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11506 if (*ip == CEE_LDELEM)
11513 case CEE_STELEM_I1:
11514 case CEE_STELEM_I2:
11515 case CEE_STELEM_I4:
11516 case CEE_STELEM_I8:
11517 case CEE_STELEM_R4:
11518 case CEE_STELEM_R8:
11519 case CEE_STELEM_REF:
11524 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11526 if (*ip == CEE_STELEM) {
11528 token = read32 (ip + 1);
11529 klass = mini_get_class (method, token, generic_context);
11530 CHECK_TYPELOAD (klass);
11531 mono_class_init (klass);
11534 klass = array_access_to_klass (*ip);
11536 if (sp [0]->type != STACK_OBJ)
11539 emit_array_store (cfg, klass, sp, TRUE);
11541 if (*ip == CEE_STELEM)
11548 case CEE_CKFINITE: {
11552 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11553 ins->sreg1 = sp [0]->dreg;
11554 ins->dreg = alloc_freg (cfg);
11555 ins->type = STACK_R8;
11556 MONO_ADD_INS (bblock, ins);
11558 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11563 case CEE_REFANYVAL: {
11564 MonoInst *src_var, *src;
11566 int klass_reg = alloc_preg (cfg);
11567 int dreg = alloc_preg (cfg);
11569 GSHAREDVT_FAILURE (*ip);
11572 MONO_INST_NEW (cfg, ins, *ip);
11575 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11576 CHECK_TYPELOAD (klass);
11578 context_used = mini_class_check_context_used (cfg, klass);
11581 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11583 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11584 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11587 if (context_used) {
11588 MonoInst *klass_ins;
11590 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11591 klass, MONO_RGCTX_INFO_KLASS);
11594 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11595 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11597 mini_emit_class_check (cfg, klass_reg, klass);
11599 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11600 ins->type = STACK_MP;
11605 case CEE_MKREFANY: {
11606 MonoInst *loc, *addr;
11608 GSHAREDVT_FAILURE (*ip);
11611 MONO_INST_NEW (cfg, ins, *ip);
11614 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11615 CHECK_TYPELOAD (klass);
11617 context_used = mini_class_check_context_used (cfg, klass);
11619 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11620 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11622 if (context_used) {
11623 MonoInst *const_ins;
11624 int type_reg = alloc_preg (cfg);
11626 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11627 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11630 } else if (cfg->compile_aot) {
11631 int const_reg = alloc_preg (cfg);
11632 int type_reg = alloc_preg (cfg);
11634 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11637 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11639 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11640 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11642 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11644 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11645 ins->type = STACK_VTYPE;
11646 ins->klass = mono_defaults.typed_reference_class;
11651 case CEE_LDTOKEN: {
11653 MonoClass *handle_class;
11655 CHECK_STACK_OVF (1);
11658 n = read32 (ip + 1);
11660 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11661 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11662 handle = mono_method_get_wrapper_data (method, n);
11663 handle_class = mono_method_get_wrapper_data (method, n + 1);
11664 if (handle_class == mono_defaults.typehandle_class)
11665 handle = &((MonoClass*)handle)->byval_arg;
11668 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11673 mono_class_init (handle_class);
11674 if (cfg->generic_sharing_context) {
11675 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11676 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11677 /* This case handles ldtoken
11678 of an open type, like for
11681 } else if (handle_class == mono_defaults.typehandle_class) {
11682 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11683 } else if (handle_class == mono_defaults.fieldhandle_class)
11684 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11685 else if (handle_class == mono_defaults.methodhandle_class)
11686 context_used = mini_method_check_context_used (cfg, handle);
11688 g_assert_not_reached ();
11691 if ((cfg->opt & MONO_OPT_SHARED) &&
11692 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11693 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11694 MonoInst *addr, *vtvar, *iargs [3];
11695 int method_context_used;
11697 method_context_used = mini_method_check_context_used (cfg, method);
11699 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11701 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11702 EMIT_NEW_ICONST (cfg, iargs [1], n);
11703 if (method_context_used) {
11704 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11705 method, MONO_RGCTX_INFO_METHOD);
11706 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11708 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11709 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11711 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11715 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11717 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11718 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11719 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11720 (cmethod->klass == mono_defaults.systemtype_class) &&
11721 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11722 MonoClass *tclass = mono_class_from_mono_type (handle);
11724 mono_class_init (tclass);
11725 if (context_used) {
11726 ins = emit_get_rgctx_klass (cfg, context_used,
11727 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11728 } else if (cfg->compile_aot) {
11729 if (method->wrapper_type) {
11730 mono_error_init (&error); //got to do it since there are multiple conditionals below
11731 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11732 /* Special case for static synchronized wrappers */
11733 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11735 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11736 /* FIXME: n is not a normal token */
11738 EMIT_NEW_PCONST (cfg, ins, NULL);
11741 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11744 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11746 ins->type = STACK_OBJ;
11747 ins->klass = cmethod->klass;
11750 MonoInst *addr, *vtvar;
11752 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11754 if (context_used) {
11755 if (handle_class == mono_defaults.typehandle_class) {
11756 ins = emit_get_rgctx_klass (cfg, context_used,
11757 mono_class_from_mono_type (handle),
11758 MONO_RGCTX_INFO_TYPE);
11759 } else if (handle_class == mono_defaults.methodhandle_class) {
11760 ins = emit_get_rgctx_method (cfg, context_used,
11761 handle, MONO_RGCTX_INFO_METHOD);
11762 } else if (handle_class == mono_defaults.fieldhandle_class) {
11763 ins = emit_get_rgctx_field (cfg, context_used,
11764 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11766 g_assert_not_reached ();
11768 } else if (cfg->compile_aot) {
11769 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11771 EMIT_NEW_PCONST (cfg, ins, handle);
11773 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11775 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11785 MONO_INST_NEW (cfg, ins, OP_THROW);
11787 ins->sreg1 = sp [0]->dreg;
11789 bblock->out_of_line = TRUE;
11790 MONO_ADD_INS (bblock, ins);
11791 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11792 MONO_ADD_INS (bblock, ins);
11795 link_bblock (cfg, bblock, end_bblock);
11796 start_new_bblock = 1;
11798 case CEE_ENDFINALLY:
11799 /* mono_save_seq_point_info () depends on this */
11800 if (sp != stack_start)
11801 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11802 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11803 MONO_ADD_INS (bblock, ins);
11805 start_new_bblock = 1;
11808 * Control will leave the method so empty the stack, otherwise
11809 * the next basic block will start with a nonempty stack.
11811 while (sp != stack_start) {
11816 case CEE_LEAVE_S: {
11819 if (*ip == CEE_LEAVE) {
11821 target = ip + 5 + (gint32)read32(ip + 1);
11824 target = ip + 2 + (signed char)(ip [1]);
11827 /* empty the stack */
11828 while (sp != stack_start) {
11833 * If this leave statement is in a catch block, check for a
11834 * pending exception, and rethrow it if necessary.
11835 * We avoid doing this in runtime invoke wrappers, since those are called
11836 * by native code which excepts the wrapper to catch all exceptions.
11838 for (i = 0; i < header->num_clauses; ++i) {
11839 MonoExceptionClause *clause = &header->clauses [i];
11842 * Use <= in the final comparison to handle clauses with multiple
11843 * leave statements, like in bug #78024.
11844 * The ordering of the exception clauses guarantees that we find the
11845 * innermost clause.
11847 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11849 MonoBasicBlock *dont_throw;
11854 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11857 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11859 NEW_BBLOCK (cfg, dont_throw);
11862 * Currently, we always rethrow the abort exception, despite the
11863 * fact that this is not correct. See thread6.cs for an example.
11864 * But propagating the abort exception is more important than
11865 * getting the sematics right.
11867 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11868 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11869 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11871 MONO_START_BB (cfg, dont_throw);
11876 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11878 MonoExceptionClause *clause;
11880 for (tmp = handlers; tmp; tmp = tmp->next) {
11881 clause = tmp->data;
11882 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11884 link_bblock (cfg, bblock, tblock);
11885 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11886 ins->inst_target_bb = tblock;
11887 ins->inst_eh_block = clause;
11888 MONO_ADD_INS (bblock, ins);
11889 bblock->has_call_handler = 1;
11890 if (COMPILE_LLVM (cfg)) {
11891 MonoBasicBlock *target_bb;
11894 * Link the finally bblock with the target, since it will
11895 * conceptually branch there.
11896 * FIXME: Have to link the bblock containing the endfinally.
11898 GET_BBLOCK (cfg, target_bb, target);
11899 link_bblock (cfg, tblock, target_bb);
11902 g_list_free (handlers);
11905 MONO_INST_NEW (cfg, ins, OP_BR);
11906 MONO_ADD_INS (bblock, ins);
11907 GET_BBLOCK (cfg, tblock, target);
11908 link_bblock (cfg, bblock, tblock);
11909 ins->inst_target_bb = tblock;
11910 start_new_bblock = 1;
11912 if (*ip == CEE_LEAVE)
11921 * Mono specific opcodes
11923 case MONO_CUSTOM_PREFIX: {
11925 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11929 case CEE_MONO_ICALL: {
11931 MonoJitICallInfo *info;
11933 token = read32 (ip + 2);
11934 func = mono_method_get_wrapper_data (method, token);
11935 info = mono_find_jit_icall_by_addr (func);
11937 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11940 CHECK_STACK (info->sig->param_count);
11941 sp -= info->sig->param_count;
11943 ins = mono_emit_jit_icall (cfg, info->func, sp);
11944 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11948 inline_costs += 10 * num_calls++;
11952 case CEE_MONO_LDPTR_CARD_TABLE: {
11954 gpointer card_mask;
11955 CHECK_STACK_OVF (1);
11957 if (cfg->compile_aot)
11958 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11960 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
11964 inline_costs += 10 * num_calls++;
11967 case CEE_MONO_LDPTR_NURSERY_START: {
11970 CHECK_STACK_OVF (1);
11972 if (cfg->compile_aot)
11973 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11975 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
11979 inline_costs += 10 * num_calls++;
11982 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11983 CHECK_STACK_OVF (1);
11985 if (cfg->compile_aot)
11986 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11988 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
11992 inline_costs += 10 * num_calls++;
11995 case CEE_MONO_LDPTR: {
11998 CHECK_STACK_OVF (1);
12000 token = read32 (ip + 2);
12002 ptr = mono_method_get_wrapper_data (method, token);
12003 EMIT_NEW_PCONST (cfg, ins, ptr);
12006 inline_costs += 10 * num_calls++;
12007 /* Can't embed random pointers into AOT code */
12011 case CEE_MONO_JIT_ICALL_ADDR: {
12012 MonoJitICallInfo *callinfo;
12015 CHECK_STACK_OVF (1);
12017 token = read32 (ip + 2);
12019 ptr = mono_method_get_wrapper_data (method, token);
12020 callinfo = mono_find_jit_icall_by_addr (ptr);
12021 g_assert (callinfo);
12022 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12025 inline_costs += 10 * num_calls++;
12028 case CEE_MONO_ICALL_ADDR: {
12029 MonoMethod *cmethod;
12032 CHECK_STACK_OVF (1);
12034 token = read32 (ip + 2);
12036 cmethod = mono_method_get_wrapper_data (method, token);
12038 if (cfg->compile_aot) {
12039 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12041 ptr = mono_lookup_internal_call (cmethod);
12043 EMIT_NEW_PCONST (cfg, ins, ptr);
12049 case CEE_MONO_VTADDR: {
12050 MonoInst *src_var, *src;
12056 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12057 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12062 case CEE_MONO_NEWOBJ: {
12063 MonoInst *iargs [2];
12065 CHECK_STACK_OVF (1);
12067 token = read32 (ip + 2);
12068 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12069 mono_class_init (klass);
12070 NEW_DOMAINCONST (cfg, iargs [0]);
12071 MONO_ADD_INS (cfg->cbb, iargs [0]);
12072 NEW_CLASSCONST (cfg, iargs [1], klass);
12073 MONO_ADD_INS (cfg->cbb, iargs [1]);
12074 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12076 inline_costs += 10 * num_calls++;
12079 case CEE_MONO_OBJADDR:
12082 MONO_INST_NEW (cfg, ins, OP_MOVE);
12083 ins->dreg = alloc_ireg_mp (cfg);
12084 ins->sreg1 = sp [0]->dreg;
12085 ins->type = STACK_MP;
12086 MONO_ADD_INS (cfg->cbb, ins);
12090 case CEE_MONO_LDNATIVEOBJ:
12092 * Similar to LDOBJ, but instead load the unmanaged
12093 * representation of the vtype to the stack.
12098 token = read32 (ip + 2);
12099 klass = mono_method_get_wrapper_data (method, token);
12100 g_assert (klass->valuetype);
12101 mono_class_init (klass);
12104 MonoInst *src, *dest, *temp;
12107 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12108 temp->backend.is_pinvoke = 1;
12109 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12110 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12112 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12113 dest->type = STACK_VTYPE;
12114 dest->klass = klass;
12120 case CEE_MONO_RETOBJ: {
12122 * Same as RET, but return the native representation of a vtype
12125 g_assert (cfg->ret);
12126 g_assert (mono_method_signature (method)->pinvoke);
12131 token = read32 (ip + 2);
12132 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12134 if (!cfg->vret_addr) {
12135 g_assert (cfg->ret_var_is_local);
12137 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12139 EMIT_NEW_RETLOADA (cfg, ins);
12141 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12143 if (sp != stack_start)
12146 MONO_INST_NEW (cfg, ins, OP_BR);
12147 ins->inst_target_bb = end_bblock;
12148 MONO_ADD_INS (bblock, ins);
12149 link_bblock (cfg, bblock, end_bblock);
12150 start_new_bblock = 1;
12154 case CEE_MONO_CISINST:
12155 case CEE_MONO_CCASTCLASS: {
12160 token = read32 (ip + 2);
12161 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12162 if (ip [1] == CEE_MONO_CISINST)
12163 ins = handle_cisinst (cfg, klass, sp [0]);
12165 ins = handle_ccastclass (cfg, klass, sp [0]);
12171 case CEE_MONO_SAVE_LMF:
12172 case CEE_MONO_RESTORE_LMF:
12173 #ifdef MONO_ARCH_HAVE_LMF_OPS
12174 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12175 MONO_ADD_INS (bblock, ins);
12176 cfg->need_lmf_area = TRUE;
12180 case CEE_MONO_CLASSCONST:
12181 CHECK_STACK_OVF (1);
12183 token = read32 (ip + 2);
12184 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12187 inline_costs += 10 * num_calls++;
12189 case CEE_MONO_NOT_TAKEN:
12190 bblock->out_of_line = TRUE;
12193 case CEE_MONO_TLS: {
12196 CHECK_STACK_OVF (1);
12198 key = (gint32)read32 (ip + 2);
12199 g_assert (key < TLS_KEY_NUM);
12201 ins = mono_create_tls_get (cfg, key);
12203 if (cfg->compile_aot) {
12205 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12206 ins->dreg = alloc_preg (cfg);
12207 ins->type = STACK_PTR;
12209 g_assert_not_reached ();
12212 ins->type = STACK_PTR;
12213 MONO_ADD_INS (bblock, ins);
12218 case CEE_MONO_DYN_CALL: {
12219 MonoCallInst *call;
12221 /* It would be easier to call a trampoline, but that would put an
12222 * extra frame on the stack, confusing exception handling. So
12223 * implement it inline using an opcode for now.
12226 if (!cfg->dyn_call_var) {
12227 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12228 /* prevent it from being register allocated */
12229 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12232 /* Has to use a call inst since it local regalloc expects it */
12233 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12234 ins = (MonoInst*)call;
12236 ins->sreg1 = sp [0]->dreg;
12237 ins->sreg2 = sp [1]->dreg;
12238 MONO_ADD_INS (bblock, ins);
12240 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12243 inline_costs += 10 * num_calls++;
12247 case CEE_MONO_MEMORY_BARRIER: {
12249 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12253 case CEE_MONO_JIT_ATTACH: {
12254 MonoInst *args [16], *domain_ins;
12255 MonoInst *ad_ins, *jit_tls_ins;
12256 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12258 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12260 EMIT_NEW_PCONST (cfg, ins, NULL);
12261 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12263 ad_ins = mono_get_domain_intrinsic (cfg);
12264 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12266 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12267 NEW_BBLOCK (cfg, next_bb);
12268 NEW_BBLOCK (cfg, call_bb);
12270 if (cfg->compile_aot) {
12271 /* AOT code is only used in the root domain */
12272 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12274 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12276 MONO_ADD_INS (cfg->cbb, ad_ins);
12277 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12278 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12280 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12284 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12285 MONO_START_BB (cfg, call_bb);
12288 if (cfg->compile_aot) {
12289 /* AOT code is only used in the root domain */
12290 EMIT_NEW_PCONST (cfg, args [0], NULL);
12292 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12294 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12295 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12298 MONO_START_BB (cfg, next_bb);
12304 case CEE_MONO_JIT_DETACH: {
12305 MonoInst *args [16];
12307 /* Restore the original domain */
12308 dreg = alloc_ireg (cfg);
12309 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12310 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12315 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12321 case CEE_PREFIX1: {
12324 case CEE_ARGLIST: {
12325 /* somewhat similar to LDTOKEN */
12326 MonoInst *addr, *vtvar;
12327 CHECK_STACK_OVF (1);
12328 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12330 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12331 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12333 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12334 ins->type = STACK_VTYPE;
12335 ins->klass = mono_defaults.argumenthandle_class;
12345 MonoInst *cmp, *arg1, *arg2;
12353 * The following transforms:
12354 * CEE_CEQ into OP_CEQ
12355 * CEE_CGT into OP_CGT
12356 * CEE_CGT_UN into OP_CGT_UN
12357 * CEE_CLT into OP_CLT
12358 * CEE_CLT_UN into OP_CLT_UN
12360 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12362 MONO_INST_NEW (cfg, ins, cmp->opcode);
12363 cmp->sreg1 = arg1->dreg;
12364 cmp->sreg2 = arg2->dreg;
12365 type_from_op (cfg, cmp, arg1, arg2);
12367 add_widen_op (cfg, cmp, &arg1, &arg2);
12368 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12369 cmp->opcode = OP_LCOMPARE;
12370 else if (arg1->type == STACK_R4)
12371 cmp->opcode = OP_RCOMPARE;
12372 else if (arg1->type == STACK_R8)
12373 cmp->opcode = OP_FCOMPARE;
12375 cmp->opcode = OP_ICOMPARE;
12376 MONO_ADD_INS (bblock, cmp);
12377 ins->type = STACK_I4;
12378 ins->dreg = alloc_dreg (cfg, ins->type);
12379 type_from_op (cfg, ins, arg1, arg2);
12381 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12383 * The backends expect the fceq opcodes to do the
12386 ins->sreg1 = cmp->sreg1;
12387 ins->sreg2 = cmp->sreg2;
12390 MONO_ADD_INS (bblock, ins);
12396 MonoInst *argconst;
12397 MonoMethod *cil_method;
12399 CHECK_STACK_OVF (1);
12401 n = read32 (ip + 2);
12402 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12403 if (!cmethod || mono_loader_get_last_error ())
12405 mono_class_init (cmethod->klass);
12407 mono_save_token_info (cfg, image, n, cmethod);
12409 context_used = mini_method_check_context_used (cfg, cmethod);
12411 cil_method = cmethod;
12412 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12413 METHOD_ACCESS_FAILURE (method, cil_method);
12415 if (mono_security_core_clr_enabled ())
12416 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12419 * Optimize the common case of ldftn+delegate creation
12421 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12422 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12423 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12424 MonoInst *target_ins, *handle_ins;
12425 MonoMethod *invoke;
12426 int invoke_context_used;
12428 invoke = mono_get_delegate_invoke (ctor_method->klass);
12429 if (!invoke || !mono_method_signature (invoke))
12432 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12434 target_ins = sp [-1];
12436 if (mono_security_core_clr_enabled ())
12437 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12439 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12440 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12441 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12443 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12447 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12448 /* FIXME: SGEN support */
12449 if (invoke_context_used == 0) {
12451 if (cfg->verbose_level > 3)
12452 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12453 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12456 CHECK_CFG_EXCEPTION;
12467 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12468 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12472 inline_costs += 10 * num_calls++;
12475 case CEE_LDVIRTFTN: {
12476 MonoInst *args [2];
12480 n = read32 (ip + 2);
12481 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12482 if (!cmethod || mono_loader_get_last_error ())
12484 mono_class_init (cmethod->klass);
12486 context_used = mini_method_check_context_used (cfg, cmethod);
12488 if (mono_security_core_clr_enabled ())
12489 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12492 * Optimize the common case of ldvirtftn+delegate creation
12494 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12495 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12496 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12497 MonoInst *target_ins, *handle_ins;
12498 MonoMethod *invoke;
12499 int invoke_context_used;
12500 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12502 invoke = mono_get_delegate_invoke (ctor_method->klass);
12503 if (!invoke || !mono_method_signature (invoke))
12506 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12508 target_ins = sp [-1];
12510 if (mono_security_core_clr_enabled ())
12511 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12513 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12514 /* FIXME: SGEN support */
12515 if (invoke_context_used == 0) {
12517 if (cfg->verbose_level > 3)
12518 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12519 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12522 CHECK_CFG_EXCEPTION;
12536 args [1] = emit_get_rgctx_method (cfg, context_used,
12537 cmethod, MONO_RGCTX_INFO_METHOD);
12540 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12542 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12545 inline_costs += 10 * num_calls++;
12549 CHECK_STACK_OVF (1);
12551 n = read16 (ip + 2);
12553 EMIT_NEW_ARGLOAD (cfg, ins, n);
12558 CHECK_STACK_OVF (1);
12560 n = read16 (ip + 2);
12562 NEW_ARGLOADA (cfg, ins, n);
12563 MONO_ADD_INS (cfg->cbb, ins);
12571 n = read16 (ip + 2);
12573 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12575 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12579 CHECK_STACK_OVF (1);
12581 n = read16 (ip + 2);
12583 EMIT_NEW_LOCLOAD (cfg, ins, n);
12588 unsigned char *tmp_ip;
12589 CHECK_STACK_OVF (1);
12591 n = read16 (ip + 2);
12594 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12600 EMIT_NEW_LOCLOADA (cfg, ins, n);
12609 n = read16 (ip + 2);
12611 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12613 emit_stloc_ir (cfg, sp, header, n);
12620 if (sp != stack_start)
12622 if (cfg->method != method)
12624 * Inlining this into a loop in a parent could lead to
12625 * stack overflows which is different behavior than the
12626 * non-inlined case, thus disable inlining in this case.
12628 INLINE_FAILURE("localloc");
12630 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12631 ins->dreg = alloc_preg (cfg);
12632 ins->sreg1 = sp [0]->dreg;
12633 ins->type = STACK_PTR;
12634 MONO_ADD_INS (cfg->cbb, ins);
12636 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12638 ins->flags |= MONO_INST_INIT;
12643 case CEE_ENDFILTER: {
12644 MonoExceptionClause *clause, *nearest;
12649 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12651 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12652 ins->sreg1 = (*sp)->dreg;
12653 MONO_ADD_INS (bblock, ins);
12654 start_new_bblock = 1;
12658 for (cc = 0; cc < header->num_clauses; ++cc) {
12659 clause = &header->clauses [cc];
12660 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12661 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12662 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12665 g_assert (nearest);
12666 if ((ip - header->code) != nearest->handler_offset)
12671 case CEE_UNALIGNED_:
12672 ins_flag |= MONO_INST_UNALIGNED;
12673 /* FIXME: record alignment? we can assume 1 for now */
12677 case CEE_VOLATILE_:
12678 ins_flag |= MONO_INST_VOLATILE;
12682 ins_flag |= MONO_INST_TAILCALL;
12683 cfg->flags |= MONO_CFG_HAS_TAIL;
12684 /* Can't inline tail calls at this time */
12685 inline_costs += 100000;
12692 token = read32 (ip + 2);
12693 klass = mini_get_class (method, token, generic_context);
12694 CHECK_TYPELOAD (klass);
12695 if (generic_class_is_reference_type (cfg, klass))
12696 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12698 mini_emit_initobj (cfg, *sp, NULL, klass);
12702 case CEE_CONSTRAINED_:
12704 token = read32 (ip + 2);
12705 constrained_class = mini_get_class (method, token, generic_context);
12706 CHECK_TYPELOAD (constrained_class);
12710 case CEE_INITBLK: {
12711 MonoInst *iargs [3];
12715 /* Skip optimized paths for volatile operations. */
12716 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12717 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12718 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12719 /* emit_memset only works when val == 0 */
12720 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12723 iargs [0] = sp [0];
12724 iargs [1] = sp [1];
12725 iargs [2] = sp [2];
12726 if (ip [1] == CEE_CPBLK) {
12728 * FIXME: It's unclear whether we should be emitting both the acquire
12729 * and release barriers for cpblk. It is technically both a load and
12730 * store operation, so it seems like that's the sensible thing to do.
12732 * FIXME: We emit full barriers on both sides of the operation for
12733 * simplicity. We should have a separate atomic memcpy method instead.
12735 MonoMethod *memcpy_method = get_memcpy_method ();
12737 if (ins_flag & MONO_INST_VOLATILE)
12738 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12740 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12741 call->flags |= ins_flag;
12743 if (ins_flag & MONO_INST_VOLATILE)
12744 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12746 MonoMethod *memset_method = get_memset_method ();
12747 if (ins_flag & MONO_INST_VOLATILE) {
12748 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12749 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12751 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12752 call->flags |= ins_flag;
12763 ins_flag |= MONO_INST_NOTYPECHECK;
12765 ins_flag |= MONO_INST_NORANGECHECK;
12766 /* we ignore the no-nullcheck for now since we
12767 * really do it explicitly only when doing callvirt->call
12771 case CEE_RETHROW: {
12773 int handler_offset = -1;
12775 for (i = 0; i < header->num_clauses; ++i) {
12776 MonoExceptionClause *clause = &header->clauses [i];
12777 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12778 handler_offset = clause->handler_offset;
12783 bblock->flags |= BB_EXCEPTION_UNSAFE;
12785 if (handler_offset == -1)
12788 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12789 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12790 ins->sreg1 = load->dreg;
12791 MONO_ADD_INS (bblock, ins);
12793 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12794 MONO_ADD_INS (bblock, ins);
12797 link_bblock (cfg, bblock, end_bblock);
12798 start_new_bblock = 1;
12806 CHECK_STACK_OVF (1);
12808 token = read32 (ip + 2);
12809 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12810 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12813 val = mono_type_size (type, &ialign);
12815 MonoClass *klass = mini_get_class (method, token, generic_context);
12816 CHECK_TYPELOAD (klass);
12818 val = mono_type_size (&klass->byval_arg, &ialign);
12820 if (mini_is_gsharedvt_klass (cfg, klass))
12821 GSHAREDVT_FAILURE (*ip);
12823 EMIT_NEW_ICONST (cfg, ins, val);
12828 case CEE_REFANYTYPE: {
12829 MonoInst *src_var, *src;
12831 GSHAREDVT_FAILURE (*ip);
12837 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12839 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12840 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12841 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12846 case CEE_READONLY_:
12859 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12869 g_warning ("opcode 0x%02x not handled", *ip);
12873 if (start_new_bblock != 1)
12876 bblock->cil_length = ip - bblock->cil_code;
12877 if (bblock->next_bb) {
12878 /* This could already be set because of inlining, #693905 */
12879 MonoBasicBlock *bb = bblock;
12881 while (bb->next_bb)
12883 bb->next_bb = end_bblock;
12885 bblock->next_bb = end_bblock;
12888 if (cfg->method == method && cfg->domainvar) {
12890 MonoInst *get_domain;
12892 cfg->cbb = init_localsbb;
12894 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12895 MONO_ADD_INS (cfg->cbb, get_domain);
12897 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12899 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12900 MONO_ADD_INS (cfg->cbb, store);
12903 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12904 if (cfg->compile_aot)
12905 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12906 mono_get_got_var (cfg);
12909 if (cfg->method == method && cfg->got_var)
12910 mono_emit_load_got_addr (cfg);
12912 if (init_localsbb) {
12913 cfg->cbb = init_localsbb;
12915 for (i = 0; i < header->num_locals; ++i) {
12916 emit_init_local (cfg, i, header->locals [i], init_locals);
12920 if (cfg->init_ref_vars && cfg->method == method) {
12921 /* Emit initialization for ref vars */
12922 // FIXME: Avoid duplication initialization for IL locals.
12923 for (i = 0; i < cfg->num_varinfo; ++i) {
12924 MonoInst *ins = cfg->varinfo [i];
12926 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12927 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12931 if (cfg->lmf_var && cfg->method == method) {
12932 cfg->cbb = init_localsbb;
12933 emit_push_lmf (cfg);
12936 cfg->cbb = init_localsbb;
12937 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12940 MonoBasicBlock *bb;
12943 * Make seq points at backward branch targets interruptable.
12945 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12946 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12947 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12950 /* Add a sequence point for method entry/exit events */
12951 if (seq_points && cfg->gen_sdb_seq_points) {
12952 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12953 MONO_ADD_INS (init_localsbb, ins);
12954 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12955 MONO_ADD_INS (cfg->bb_exit, ins);
12959 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12960 * the code they refer to was dead (#11880).
12962 if (sym_seq_points) {
12963 for (i = 0; i < header->code_size; ++i) {
12964 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12967 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12968 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12975 if (cfg->method == method) {
12976 MonoBasicBlock *bb;
12977 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12978 bb->region = mono_find_block_region (cfg, bb->real_offset);
12980 mono_create_spvar_for_region (cfg, bb->region);
12981 if (cfg->verbose_level > 2)
12982 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12986 if (inline_costs < 0) {
12989 /* Method is too large */
12990 mname = mono_method_full_name (method, TRUE);
12991 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12992 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12996 if ((cfg->verbose_level > 2) && (cfg->method == method))
12997 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13002 g_assert (!mono_error_ok (&cfg->error));
13006 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13010 set_exception_type_from_invalid_il (cfg, method, ip);
13014 g_slist_free (class_inits);
13015 mono_basic_block_free (original_bb);
13016 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13017 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13018 if (cfg->exception_type)
13021 return inline_costs;
13025 store_membase_reg_to_store_membase_imm (int opcode)
13028 case OP_STORE_MEMBASE_REG:
13029 return OP_STORE_MEMBASE_IMM;
13030 case OP_STOREI1_MEMBASE_REG:
13031 return OP_STOREI1_MEMBASE_IMM;
13032 case OP_STOREI2_MEMBASE_REG:
13033 return OP_STOREI2_MEMBASE_IMM;
13034 case OP_STOREI4_MEMBASE_REG:
13035 return OP_STOREI4_MEMBASE_IMM;
13036 case OP_STOREI8_MEMBASE_REG:
13037 return OP_STOREI8_MEMBASE_IMM;
13039 g_assert_not_reached ();
13046 mono_op_to_op_imm (int opcode)
13050 return OP_IADD_IMM;
13052 return OP_ISUB_IMM;
13054 return OP_IDIV_IMM;
13056 return OP_IDIV_UN_IMM;
13058 return OP_IREM_IMM;
13060 return OP_IREM_UN_IMM;
13062 return OP_IMUL_IMM;
13064 return OP_IAND_IMM;
13068 return OP_IXOR_IMM;
13070 return OP_ISHL_IMM;
13072 return OP_ISHR_IMM;
13074 return OP_ISHR_UN_IMM;
13077 return OP_LADD_IMM;
13079 return OP_LSUB_IMM;
13081 return OP_LAND_IMM;
13085 return OP_LXOR_IMM;
13087 return OP_LSHL_IMM;
13089 return OP_LSHR_IMM;
13091 return OP_LSHR_UN_IMM;
13092 #if SIZEOF_REGISTER == 8
13094 return OP_LREM_IMM;
13098 return OP_COMPARE_IMM;
13100 return OP_ICOMPARE_IMM;
13102 return OP_LCOMPARE_IMM;
13104 case OP_STORE_MEMBASE_REG:
13105 return OP_STORE_MEMBASE_IMM;
13106 case OP_STOREI1_MEMBASE_REG:
13107 return OP_STOREI1_MEMBASE_IMM;
13108 case OP_STOREI2_MEMBASE_REG:
13109 return OP_STOREI2_MEMBASE_IMM;
13110 case OP_STOREI4_MEMBASE_REG:
13111 return OP_STOREI4_MEMBASE_IMM;
13113 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13115 return OP_X86_PUSH_IMM;
13116 case OP_X86_COMPARE_MEMBASE_REG:
13117 return OP_X86_COMPARE_MEMBASE_IMM;
13119 #if defined(TARGET_AMD64)
13120 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13121 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13123 case OP_VOIDCALL_REG:
13124 return OP_VOIDCALL;
13132 return OP_LOCALLOC_IMM;
13139 ldind_to_load_membase (int opcode)
13143 return OP_LOADI1_MEMBASE;
13145 return OP_LOADU1_MEMBASE;
13147 return OP_LOADI2_MEMBASE;
13149 return OP_LOADU2_MEMBASE;
13151 return OP_LOADI4_MEMBASE;
13153 return OP_LOADU4_MEMBASE;
13155 return OP_LOAD_MEMBASE;
13156 case CEE_LDIND_REF:
13157 return OP_LOAD_MEMBASE;
13159 return OP_LOADI8_MEMBASE;
13161 return OP_LOADR4_MEMBASE;
13163 return OP_LOADR8_MEMBASE;
13165 g_assert_not_reached ();
13172 stind_to_store_membase (int opcode)
13176 return OP_STOREI1_MEMBASE_REG;
13178 return OP_STOREI2_MEMBASE_REG;
13180 return OP_STOREI4_MEMBASE_REG;
13182 case CEE_STIND_REF:
13183 return OP_STORE_MEMBASE_REG;
13185 return OP_STOREI8_MEMBASE_REG;
13187 return OP_STORER4_MEMBASE_REG;
13189 return OP_STORER8_MEMBASE_REG;
13191 g_assert_not_reached ();
13198 mono_load_membase_to_load_mem (int opcode)
13200 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13201 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13203 case OP_LOAD_MEMBASE:
13204 return OP_LOAD_MEM;
13205 case OP_LOADU1_MEMBASE:
13206 return OP_LOADU1_MEM;
13207 case OP_LOADU2_MEMBASE:
13208 return OP_LOADU2_MEM;
13209 case OP_LOADI4_MEMBASE:
13210 return OP_LOADI4_MEM;
13211 case OP_LOADU4_MEMBASE:
13212 return OP_LOADU4_MEM;
13213 #if SIZEOF_REGISTER == 8
13214 case OP_LOADI8_MEMBASE:
13215 return OP_LOADI8_MEM;
13224 op_to_op_dest_membase (int store_opcode, int opcode)
13226 #if defined(TARGET_X86)
13227 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13232 return OP_X86_ADD_MEMBASE_REG;
13234 return OP_X86_SUB_MEMBASE_REG;
13236 return OP_X86_AND_MEMBASE_REG;
13238 return OP_X86_OR_MEMBASE_REG;
13240 return OP_X86_XOR_MEMBASE_REG;
13243 return OP_X86_ADD_MEMBASE_IMM;
13246 return OP_X86_SUB_MEMBASE_IMM;
13249 return OP_X86_AND_MEMBASE_IMM;
13252 return OP_X86_OR_MEMBASE_IMM;
13255 return OP_X86_XOR_MEMBASE_IMM;
13261 #if defined(TARGET_AMD64)
13262 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13267 return OP_X86_ADD_MEMBASE_REG;
13269 return OP_X86_SUB_MEMBASE_REG;
13271 return OP_X86_AND_MEMBASE_REG;
13273 return OP_X86_OR_MEMBASE_REG;
13275 return OP_X86_XOR_MEMBASE_REG;
13277 return OP_X86_ADD_MEMBASE_IMM;
13279 return OP_X86_SUB_MEMBASE_IMM;
13281 return OP_X86_AND_MEMBASE_IMM;
13283 return OP_X86_OR_MEMBASE_IMM;
13285 return OP_X86_XOR_MEMBASE_IMM;
13287 return OP_AMD64_ADD_MEMBASE_REG;
13289 return OP_AMD64_SUB_MEMBASE_REG;
13291 return OP_AMD64_AND_MEMBASE_REG;
13293 return OP_AMD64_OR_MEMBASE_REG;
13295 return OP_AMD64_XOR_MEMBASE_REG;
13298 return OP_AMD64_ADD_MEMBASE_IMM;
13301 return OP_AMD64_SUB_MEMBASE_IMM;
13304 return OP_AMD64_AND_MEMBASE_IMM;
13307 return OP_AMD64_OR_MEMBASE_IMM;
13310 return OP_AMD64_XOR_MEMBASE_IMM;
13320 op_to_op_store_membase (int store_opcode, int opcode)
13322 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13325 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13326 return OP_X86_SETEQ_MEMBASE;
13328 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13329 return OP_X86_SETNE_MEMBASE;
13337 op_to_op_src1_membase (int load_opcode, int opcode)
13340 /* FIXME: This has sign extension issues */
13342 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13343 return OP_X86_COMPARE_MEMBASE8_IMM;
13346 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13351 return OP_X86_PUSH_MEMBASE;
13352 case OP_COMPARE_IMM:
13353 case OP_ICOMPARE_IMM:
13354 return OP_X86_COMPARE_MEMBASE_IMM;
13357 return OP_X86_COMPARE_MEMBASE_REG;
13361 #ifdef TARGET_AMD64
13362 /* FIXME: This has sign extension issues */
13364 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13365 return OP_X86_COMPARE_MEMBASE8_IMM;
13370 #ifdef __mono_ilp32__
13371 if (load_opcode == OP_LOADI8_MEMBASE)
13373 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13375 return OP_X86_PUSH_MEMBASE;
13377 /* FIXME: This only works for 32 bit immediates
13378 case OP_COMPARE_IMM:
13379 case OP_LCOMPARE_IMM:
13380 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13381 return OP_AMD64_COMPARE_MEMBASE_IMM;
13383 case OP_ICOMPARE_IMM:
13384 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13385 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13389 #ifdef __mono_ilp32__
13390 if (load_opcode == OP_LOAD_MEMBASE)
13391 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13392 if (load_opcode == OP_LOADI8_MEMBASE)
13394 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13396 return OP_AMD64_COMPARE_MEMBASE_REG;
13399 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13400 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13409 op_to_op_src2_membase (int load_opcode, int opcode)
13412 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13418 return OP_X86_COMPARE_REG_MEMBASE;
13420 return OP_X86_ADD_REG_MEMBASE;
13422 return OP_X86_SUB_REG_MEMBASE;
13424 return OP_X86_AND_REG_MEMBASE;
13426 return OP_X86_OR_REG_MEMBASE;
13428 return OP_X86_XOR_REG_MEMBASE;
13432 #ifdef TARGET_AMD64
13433 #ifdef __mono_ilp32__
13434 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13436 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13440 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13442 return OP_X86_ADD_REG_MEMBASE;
13444 return OP_X86_SUB_REG_MEMBASE;
13446 return OP_X86_AND_REG_MEMBASE;
13448 return OP_X86_OR_REG_MEMBASE;
13450 return OP_X86_XOR_REG_MEMBASE;
13452 #ifdef __mono_ilp32__
13453 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13455 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13460 return OP_AMD64_COMPARE_REG_MEMBASE;
13462 return OP_AMD64_ADD_REG_MEMBASE;
13464 return OP_AMD64_SUB_REG_MEMBASE;
13466 return OP_AMD64_AND_REG_MEMBASE;
13468 return OP_AMD64_OR_REG_MEMBASE;
13470 return OP_AMD64_XOR_REG_MEMBASE;
13479 mono_op_to_op_imm_noemul (int opcode)
13482 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13488 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13495 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13500 return mono_op_to_op_imm (opcode);
13505 * mono_handle_global_vregs:
13507 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13511 mono_handle_global_vregs (MonoCompile *cfg)
13513 gint32 *vreg_to_bb;
13514 MonoBasicBlock *bb;
13517 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13519 #ifdef MONO_ARCH_SIMD_INTRINSICS
13520 if (cfg->uses_simd_intrinsics)
13521 mono_simd_simplify_indirection (cfg);
13524 /* Find local vregs used in more than one bb */
13525 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13526 MonoInst *ins = bb->code;
13527 int block_num = bb->block_num;
13529 if (cfg->verbose_level > 2)
13530 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13533 for (; ins; ins = ins->next) {
13534 const char *spec = INS_INFO (ins->opcode);
13535 int regtype = 0, regindex;
13538 if (G_UNLIKELY (cfg->verbose_level > 2))
13539 mono_print_ins (ins);
13541 g_assert (ins->opcode >= MONO_CEE_LAST);
13543 for (regindex = 0; regindex < 4; regindex ++) {
13546 if (regindex == 0) {
13547 regtype = spec [MONO_INST_DEST];
13548 if (regtype == ' ')
13551 } else if (regindex == 1) {
13552 regtype = spec [MONO_INST_SRC1];
13553 if (regtype == ' ')
13556 } else if (regindex == 2) {
13557 regtype = spec [MONO_INST_SRC2];
13558 if (regtype == ' ')
13561 } else if (regindex == 3) {
13562 regtype = spec [MONO_INST_SRC3];
13563 if (regtype == ' ')
13568 #if SIZEOF_REGISTER == 4
13569 /* In the LLVM case, the long opcodes are not decomposed */
13570 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13572 * Since some instructions reference the original long vreg,
13573 * and some reference the two component vregs, it is quite hard
13574 * to determine when it needs to be global. So be conservative.
13576 if (!get_vreg_to_inst (cfg, vreg)) {
13577 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13579 if (cfg->verbose_level > 2)
13580 printf ("LONG VREG R%d made global.\n", vreg);
13584 * Make the component vregs volatile since the optimizations can
13585 * get confused otherwise.
13587 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13588 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13592 g_assert (vreg != -1);
13594 prev_bb = vreg_to_bb [vreg];
13595 if (prev_bb == 0) {
13596 /* 0 is a valid block num */
13597 vreg_to_bb [vreg] = block_num + 1;
13598 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13599 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13602 if (!get_vreg_to_inst (cfg, vreg)) {
13603 if (G_UNLIKELY (cfg->verbose_level > 2))
13604 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13608 if (vreg_is_ref (cfg, vreg))
13609 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13611 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13614 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13617 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13620 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13623 g_assert_not_reached ();
13627 /* Flag as having been used in more than one bb */
13628 vreg_to_bb [vreg] = -1;
13634 /* If a variable is used in only one bblock, convert it into a local vreg */
13635 for (i = 0; i < cfg->num_varinfo; i++) {
13636 MonoInst *var = cfg->varinfo [i];
13637 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13639 switch (var->type) {
13645 #if SIZEOF_REGISTER == 8
13648 #if !defined(TARGET_X86)
13649 /* Enabling this screws up the fp stack on x86 */
13652 if (mono_arch_is_soft_float ())
13655 /* Arguments are implicitly global */
13656 /* Putting R4 vars into registers doesn't work currently */
13657 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13658 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13660 * Make that the variable's liveness interval doesn't contain a call, since
13661 * that would cause the lvreg to be spilled, making the whole optimization
13664 /* This is too slow for JIT compilation */
13666 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13668 int def_index, call_index, ins_index;
13669 gboolean spilled = FALSE;
13674 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13675 const char *spec = INS_INFO (ins->opcode);
13677 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13678 def_index = ins_index;
13680 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13681 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13682 if (call_index > def_index) {
13688 if (MONO_IS_CALL (ins))
13689 call_index = ins_index;
13699 if (G_UNLIKELY (cfg->verbose_level > 2))
13700 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13701 var->flags |= MONO_INST_IS_DEAD;
13702 cfg->vreg_to_inst [var->dreg] = NULL;
13709 * Compress the varinfo and vars tables so the liveness computation is faster and
13710 * takes up less space.
13713 for (i = 0; i < cfg->num_varinfo; ++i) {
13714 MonoInst *var = cfg->varinfo [i];
13715 if (pos < i && cfg->locals_start == i)
13716 cfg->locals_start = pos;
13717 if (!(var->flags & MONO_INST_IS_DEAD)) {
13719 cfg->varinfo [pos] = cfg->varinfo [i];
13720 cfg->varinfo [pos]->inst_c0 = pos;
13721 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13722 cfg->vars [pos].idx = pos;
13723 #if SIZEOF_REGISTER == 4
13724 if (cfg->varinfo [pos]->type == STACK_I8) {
13725 /* Modify the two component vars too */
13728 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13729 var1->inst_c0 = pos;
13730 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13731 var1->inst_c0 = pos;
13738 cfg->num_varinfo = pos;
13739 if (cfg->locals_start > cfg->num_varinfo)
13740 cfg->locals_start = cfg->num_varinfo;
13744 * mono_spill_global_vars:
13746 * Generate spill code for variables which are not allocated to registers,
13747 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13748 * code is generated which could be optimized by the local optimization passes.
13751 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13753 MonoBasicBlock *bb;
13755 int orig_next_vreg;
13756 guint32 *vreg_to_lvreg;
13758 guint32 i, lvregs_len;
13759 gboolean dest_has_lvreg = FALSE;
13760 guint32 stacktypes [128];
13761 MonoInst **live_range_start, **live_range_end;
13762 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13763 int *gsharedvt_vreg_to_idx = NULL;
13765 *need_local_opts = FALSE;
13767 memset (spec2, 0, sizeof (spec2));
13769 /* FIXME: Move this function to mini.c */
13770 stacktypes ['i'] = STACK_PTR;
13771 stacktypes ['l'] = STACK_I8;
13772 stacktypes ['f'] = STACK_R8;
13773 #ifdef MONO_ARCH_SIMD_INTRINSICS
13774 stacktypes ['x'] = STACK_VTYPE;
13777 #if SIZEOF_REGISTER == 4
13778 /* Create MonoInsts for longs */
13779 for (i = 0; i < cfg->num_varinfo; i++) {
13780 MonoInst *ins = cfg->varinfo [i];
13782 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13783 switch (ins->type) {
13788 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13791 g_assert (ins->opcode == OP_REGOFFSET);
13793 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13795 tree->opcode = OP_REGOFFSET;
13796 tree->inst_basereg = ins->inst_basereg;
13797 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13799 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13801 tree->opcode = OP_REGOFFSET;
13802 tree->inst_basereg = ins->inst_basereg;
13803 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13813 if (cfg->compute_gc_maps) {
13814 /* registers need liveness info even for !non refs */
13815 for (i = 0; i < cfg->num_varinfo; i++) {
13816 MonoInst *ins = cfg->varinfo [i];
13818 if (ins->opcode == OP_REGVAR)
13819 ins->flags |= MONO_INST_GC_TRACK;
13823 if (cfg->gsharedvt) {
13824 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13826 for (i = 0; i < cfg->num_varinfo; ++i) {
13827 MonoInst *ins = cfg->varinfo [i];
13830 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13831 if (i >= cfg->locals_start) {
13833 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13834 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13835 ins->opcode = OP_GSHAREDVT_LOCAL;
13836 ins->inst_imm = idx;
13839 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13840 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13846 /* FIXME: widening and truncation */
13849 * As an optimization, when a variable allocated to the stack is first loaded into
13850 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13851 * the variable again.
13853 orig_next_vreg = cfg->next_vreg;
13854 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13855 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13859 * These arrays contain the first and last instructions accessing a given
13861 * Since we emit bblocks in the same order we process them here, and we
13862 * don't split live ranges, these will precisely describe the live range of
13863 * the variable, i.e. the instruction range where a valid value can be found
13864 * in the variables location.
13865 * The live range is computed using the liveness info computed by the liveness pass.
13866 * We can't use vmv->range, since that is an abstract live range, and we need
13867 * one which is instruction precise.
13868 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13870 /* FIXME: Only do this if debugging info is requested */
13871 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13872 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13873 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13874 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13876 /* Add spill loads/stores */
13877 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13880 if (cfg->verbose_level > 2)
13881 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13883 /* Clear vreg_to_lvreg array */
13884 for (i = 0; i < lvregs_len; i++)
13885 vreg_to_lvreg [lvregs [i]] = 0;
13889 MONO_BB_FOR_EACH_INS (bb, ins) {
13890 const char *spec = INS_INFO (ins->opcode);
13891 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13892 gboolean store, no_lvreg;
13893 int sregs [MONO_MAX_SRC_REGS];
13895 if (G_UNLIKELY (cfg->verbose_level > 2))
13896 mono_print_ins (ins);
13898 if (ins->opcode == OP_NOP)
13902 * We handle LDADDR here as well, since it can only be decomposed
13903 * when variable addresses are known.
13905 if (ins->opcode == OP_LDADDR) {
13906 MonoInst *var = ins->inst_p0;
13908 if (var->opcode == OP_VTARG_ADDR) {
13909 /* Happens on SPARC/S390 where vtypes are passed by reference */
13910 MonoInst *vtaddr = var->inst_left;
13911 if (vtaddr->opcode == OP_REGVAR) {
13912 ins->opcode = OP_MOVE;
13913 ins->sreg1 = vtaddr->dreg;
13915 else if (var->inst_left->opcode == OP_REGOFFSET) {
13916 ins->opcode = OP_LOAD_MEMBASE;
13917 ins->inst_basereg = vtaddr->inst_basereg;
13918 ins->inst_offset = vtaddr->inst_offset;
13921 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13922 /* gsharedvt arg passed by ref */
13923 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13925 ins->opcode = OP_LOAD_MEMBASE;
13926 ins->inst_basereg = var->inst_basereg;
13927 ins->inst_offset = var->inst_offset;
13928 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13929 MonoInst *load, *load2, *load3;
13930 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13931 int reg1, reg2, reg3;
13932 MonoInst *info_var = cfg->gsharedvt_info_var;
13933 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13937 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13940 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13942 g_assert (info_var);
13943 g_assert (locals_var);
13945 /* Mark the instruction used to compute the locals var as used */
13946 cfg->gsharedvt_locals_var_ins = NULL;
13948 /* Load the offset */
13949 if (info_var->opcode == OP_REGOFFSET) {
13950 reg1 = alloc_ireg (cfg);
13951 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13952 } else if (info_var->opcode == OP_REGVAR) {
13954 reg1 = info_var->dreg;
13956 g_assert_not_reached ();
13958 reg2 = alloc_ireg (cfg);
13959 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13960 /* Load the locals area address */
13961 reg3 = alloc_ireg (cfg);
13962 if (locals_var->opcode == OP_REGOFFSET) {
13963 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13964 } else if (locals_var->opcode == OP_REGVAR) {
13965 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13967 g_assert_not_reached ();
13969 /* Compute the address */
13970 ins->opcode = OP_PADD;
13974 mono_bblock_insert_before_ins (bb, ins, load3);
13975 mono_bblock_insert_before_ins (bb, load3, load2);
13977 mono_bblock_insert_before_ins (bb, load2, load);
13979 g_assert (var->opcode == OP_REGOFFSET);
13981 ins->opcode = OP_ADD_IMM;
13982 ins->sreg1 = var->inst_basereg;
13983 ins->inst_imm = var->inst_offset;
13986 *need_local_opts = TRUE;
13987 spec = INS_INFO (ins->opcode);
13990 if (ins->opcode < MONO_CEE_LAST) {
13991 mono_print_ins (ins);
13992 g_assert_not_reached ();
13996 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14000 if (MONO_IS_STORE_MEMBASE (ins)) {
14001 tmp_reg = ins->dreg;
14002 ins->dreg = ins->sreg2;
14003 ins->sreg2 = tmp_reg;
14006 spec2 [MONO_INST_DEST] = ' ';
14007 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14008 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14009 spec2 [MONO_INST_SRC3] = ' ';
14011 } else if (MONO_IS_STORE_MEMINDEX (ins))
14012 g_assert_not_reached ();
14017 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14018 printf ("\t %.3s %d", spec, ins->dreg);
14019 num_sregs = mono_inst_get_src_registers (ins, sregs);
14020 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14021 printf (" %d", sregs [srcindex]);
14028 regtype = spec [MONO_INST_DEST];
14029 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14032 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14033 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14034 MonoInst *store_ins;
14036 MonoInst *def_ins = ins;
14037 int dreg = ins->dreg; /* The original vreg */
14039 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14041 if (var->opcode == OP_REGVAR) {
14042 ins->dreg = var->dreg;
14043 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14045 * Instead of emitting a load+store, use a _membase opcode.
14047 g_assert (var->opcode == OP_REGOFFSET);
14048 if (ins->opcode == OP_MOVE) {
14052 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14053 ins->inst_basereg = var->inst_basereg;
14054 ins->inst_offset = var->inst_offset;
14057 spec = INS_INFO (ins->opcode);
14061 g_assert (var->opcode == OP_REGOFFSET);
14063 prev_dreg = ins->dreg;
14065 /* Invalidate any previous lvreg for this vreg */
14066 vreg_to_lvreg [ins->dreg] = 0;
14070 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14072 store_opcode = OP_STOREI8_MEMBASE_REG;
14075 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14077 #if SIZEOF_REGISTER != 8
14078 if (regtype == 'l') {
14079 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14080 mono_bblock_insert_after_ins (bb, ins, store_ins);
14081 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14082 mono_bblock_insert_after_ins (bb, ins, store_ins);
14083 def_ins = store_ins;
14088 g_assert (store_opcode != OP_STOREV_MEMBASE);
14090 /* Try to fuse the store into the instruction itself */
14091 /* FIXME: Add more instructions */
14092 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14093 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14094 ins->inst_imm = ins->inst_c0;
14095 ins->inst_destbasereg = var->inst_basereg;
14096 ins->inst_offset = var->inst_offset;
14097 spec = INS_INFO (ins->opcode);
14098 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14099 ins->opcode = store_opcode;
14100 ins->inst_destbasereg = var->inst_basereg;
14101 ins->inst_offset = var->inst_offset;
14105 tmp_reg = ins->dreg;
14106 ins->dreg = ins->sreg2;
14107 ins->sreg2 = tmp_reg;
14110 spec2 [MONO_INST_DEST] = ' ';
14111 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14112 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14113 spec2 [MONO_INST_SRC3] = ' ';
14115 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14116 // FIXME: The backends expect the base reg to be in inst_basereg
14117 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14119 ins->inst_basereg = var->inst_basereg;
14120 ins->inst_offset = var->inst_offset;
14121 spec = INS_INFO (ins->opcode);
14123 /* printf ("INS: "); mono_print_ins (ins); */
14124 /* Create a store instruction */
14125 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14127 /* Insert it after the instruction */
14128 mono_bblock_insert_after_ins (bb, ins, store_ins);
14130 def_ins = store_ins;
14133 * We can't assign ins->dreg to var->dreg here, since the
14134 * sregs could use it. So set a flag, and do it after
14137 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14138 dest_has_lvreg = TRUE;
14143 if (def_ins && !live_range_start [dreg]) {
14144 live_range_start [dreg] = def_ins;
14145 live_range_start_bb [dreg] = bb;
14148 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14151 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14152 tmp->inst_c1 = dreg;
14153 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14160 num_sregs = mono_inst_get_src_registers (ins, sregs);
14161 for (srcindex = 0; srcindex < 3; ++srcindex) {
14162 regtype = spec [MONO_INST_SRC1 + srcindex];
14163 sreg = sregs [srcindex];
14165 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14166 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14167 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14168 MonoInst *use_ins = ins;
14169 MonoInst *load_ins;
14170 guint32 load_opcode;
14172 if (var->opcode == OP_REGVAR) {
14173 sregs [srcindex] = var->dreg;
14174 //mono_inst_set_src_registers (ins, sregs);
14175 live_range_end [sreg] = use_ins;
14176 live_range_end_bb [sreg] = bb;
14178 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14181 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14182 /* var->dreg is a hreg */
14183 tmp->inst_c1 = sreg;
14184 mono_bblock_insert_after_ins (bb, ins, tmp);
14190 g_assert (var->opcode == OP_REGOFFSET);
14192 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14194 g_assert (load_opcode != OP_LOADV_MEMBASE);
14196 if (vreg_to_lvreg [sreg]) {
14197 g_assert (vreg_to_lvreg [sreg] != -1);
14199 /* The variable is already loaded to an lvreg */
14200 if (G_UNLIKELY (cfg->verbose_level > 2))
14201 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14202 sregs [srcindex] = vreg_to_lvreg [sreg];
14203 //mono_inst_set_src_registers (ins, sregs);
14207 /* Try to fuse the load into the instruction */
14208 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14209 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14210 sregs [0] = var->inst_basereg;
14211 //mono_inst_set_src_registers (ins, sregs);
14212 ins->inst_offset = var->inst_offset;
14213 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14214 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14215 sregs [1] = var->inst_basereg;
14216 //mono_inst_set_src_registers (ins, sregs);
14217 ins->inst_offset = var->inst_offset;
14219 if (MONO_IS_REAL_MOVE (ins)) {
14220 ins->opcode = OP_NOP;
14223 //printf ("%d ", srcindex); mono_print_ins (ins);
14225 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14227 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14228 if (var->dreg == prev_dreg) {
14230 * sreg refers to the value loaded by the load
14231 * emitted below, but we need to use ins->dreg
14232 * since it refers to the store emitted earlier.
14236 g_assert (sreg != -1);
14237 vreg_to_lvreg [var->dreg] = sreg;
14238 g_assert (lvregs_len < 1024);
14239 lvregs [lvregs_len ++] = var->dreg;
14243 sregs [srcindex] = sreg;
14244 //mono_inst_set_src_registers (ins, sregs);
14246 #if SIZEOF_REGISTER != 8
14247 if (regtype == 'l') {
14248 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14249 mono_bblock_insert_before_ins (bb, ins, load_ins);
14250 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14251 mono_bblock_insert_before_ins (bb, ins, load_ins);
14252 use_ins = load_ins;
14257 #if SIZEOF_REGISTER == 4
14258 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14260 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14261 mono_bblock_insert_before_ins (bb, ins, load_ins);
14262 use_ins = load_ins;
14266 if (var->dreg < orig_next_vreg) {
14267 live_range_end [var->dreg] = use_ins;
14268 live_range_end_bb [var->dreg] = bb;
14271 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14274 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14275 tmp->inst_c1 = var->dreg;
14276 mono_bblock_insert_after_ins (bb, ins, tmp);
14280 mono_inst_set_src_registers (ins, sregs);
14282 if (dest_has_lvreg) {
14283 g_assert (ins->dreg != -1);
14284 vreg_to_lvreg [prev_dreg] = ins->dreg;
14285 g_assert (lvregs_len < 1024);
14286 lvregs [lvregs_len ++] = prev_dreg;
14287 dest_has_lvreg = FALSE;
14291 tmp_reg = ins->dreg;
14292 ins->dreg = ins->sreg2;
14293 ins->sreg2 = tmp_reg;
14296 if (MONO_IS_CALL (ins)) {
14297 /* Clear vreg_to_lvreg array */
14298 for (i = 0; i < lvregs_len; i++)
14299 vreg_to_lvreg [lvregs [i]] = 0;
14301 } else if (ins->opcode == OP_NOP) {
14303 MONO_INST_NULLIFY_SREGS (ins);
14306 if (cfg->verbose_level > 2)
14307 mono_print_ins_index (1, ins);
14310 /* Extend the live range based on the liveness info */
14311 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14312 for (i = 0; i < cfg->num_varinfo; i ++) {
14313 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14315 if (vreg_is_volatile (cfg, vi->vreg))
14316 /* The liveness info is incomplete */
14319 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14320 /* Live from at least the first ins of this bb */
14321 live_range_start [vi->vreg] = bb->code;
14322 live_range_start_bb [vi->vreg] = bb;
14325 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14326 /* Live at least until the last ins of this bb */
14327 live_range_end [vi->vreg] = bb->last_ins;
14328 live_range_end_bb [vi->vreg] = bb;
14334 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14336 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14337 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14339 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14340 for (i = 0; i < cfg->num_varinfo; ++i) {
14341 int vreg = MONO_VARINFO (cfg, i)->vreg;
14344 if (live_range_start [vreg]) {
14345 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14347 ins->inst_c1 = vreg;
14348 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14350 if (live_range_end [vreg]) {
14351 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14353 ins->inst_c1 = vreg;
14354 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14355 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14357 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14363 if (cfg->gsharedvt_locals_var_ins) {
14364 /* Nullify if unused */
14365 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14366 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14369 g_free (live_range_start);
14370 g_free (live_range_end);
14371 g_free (live_range_start_bb);
14372 g_free (live_range_end_bb);
14377 * - use 'iadd' instead of 'int_add'
14378 * - handling ovf opcodes: decompose in method_to_ir.
14379 * - unify iregs/fregs
14380 * -> partly done, the missing parts are:
14381 * - a more complete unification would involve unifying the hregs as well, so
14382 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14383 * would no longer map to the machine hregs, so the code generators would need to
14384 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14385 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14386 * fp/non-fp branches speeds it up by about 15%.
14387 * - use sext/zext opcodes instead of shifts
14389 * - get rid of TEMPLOADs if possible and use vregs instead
14390 * - clean up usage of OP_P/OP_ opcodes
14391 * - cleanup usage of DUMMY_USE
14392 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14394 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14395 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14396 * - make sure handle_stack_args () is called before the branch is emitted
14397 * - when the new IR is done, get rid of all unused stuff
14398 * - COMPARE/BEQ as separate instructions or unify them ?
14399 * - keeping them separate allows specialized compare instructions like
14400 * compare_imm, compare_membase
14401 * - most back ends unify fp compare+branch, fp compare+ceq
14402 * - integrate mono_save_args into inline_method
14403 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14404 * - handle long shift opts on 32 bit platforms somehow: they require
14405 * 3 sregs (2 for arg1 and 1 for arg2)
14406 * - make byref a 'normal' type.
14407 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14408 * variable if needed.
14409 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14410 * like inline_method.
14411 * - remove inlining restrictions
14412 * - fix LNEG and enable cfold of INEG
14413 * - generalize x86 optimizations like ldelema as a peephole optimization
14414 * - add store_mem_imm for amd64
14415 * - optimize the loading of the interruption flag in the managed->native wrappers
14416 * - avoid special handling of OP_NOP in passes
14417 * - move code inserting instructions into one function/macro.
14418 * - try a coalescing phase after liveness analysis
14419 * - add float -> vreg conversion + local optimizations on !x86
14420 * - figure out how to handle decomposed branches during optimizations, ie.
14421 * compare+branch, op_jump_table+op_br etc.
14422 * - promote RuntimeXHandles to vregs
14423 * - vtype cleanups:
14424 * - add a NEW_VARLOADA_VREG macro
14425 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14426 * accessing vtype fields.
14427 * - get rid of I8CONST on 64 bit platforms
14428 * - dealing with the increase in code size due to branches created during opcode
14430 * - use extended basic blocks
14431 * - all parts of the JIT
14432 * - handle_global_vregs () && local regalloc
14433 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14434 * - sources of increase in code size:
14437 * - isinst and castclass
14438 * - lvregs not allocated to global registers even if used multiple times
14439 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14441 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14442 * - add all micro optimizations from the old JIT
14443 * - put tree optimizations into the deadce pass
14444 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14445 * specific function.
14446 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14447 * fcompare + branchCC.
14448 * - create a helper function for allocating a stack slot, taking into account
14449 * MONO_CFG_HAS_SPILLUP.
14451 * - merge the ia64 switch changes.
14452 * - optimize mono_regstate2_alloc_int/float.
14453 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14454 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14455 * parts of the tree could be separated by other instructions, killing the tree
14456 * arguments, or stores killing loads etc. Also, should we fold loads into other
14457 * instructions if the result of the load is used multiple times ?
14458 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14459 * - LAST MERGE: 108395.
14460 * - when returning vtypes in registers, generate IR and append it to the end of the
14461 * last bb instead of doing it in the epilog.
14462 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14470 - When to decompose opcodes:
14471 - earlier: this makes some optimizations hard to implement, since the low level IR
14472 no longer contains the neccessary information. But it is easier to do.
14473 - later: harder to implement, enables more optimizations.
14474 - Branches inside bblocks:
14475 - created when decomposing complex opcodes.
14476 - branches to another bblock: harmless, but not tracked by the branch
14477 optimizations, so need to branch to a label at the start of the bblock.
14478 - branches to inside the same bblock: very problematic, trips up the local
14479 reg allocator. Can be fixed by spitting the current bblock, but that is a
14480 complex operation, since some local vregs can become global vregs etc.
14481 - Local/global vregs:
14482 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14483 local register allocator.
14484 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14485 structure, created by mono_create_var (). Assigned to hregs or the stack by
14486 the global register allocator.
14487 - When to do optimizations like alu->alu_imm:
14488 - earlier -> saves work later on since the IR will be smaller/simpler
14489 - later -> can work on more instructions
14490 - Handling of valuetypes:
14491 - When a vtype is pushed on the stack, a new temporary is created, an
14492 instruction computing its address (LDADDR) is emitted and pushed on
14493 the stack. Need to optimize cases when the vtype is used immediately as in
14494 argument passing, stloc etc.
14495 - Instead of the to_end stuff in the old JIT, simply call the function handling
14496 the values on the stack before emitting the last instruction of the bb.
14499 #endif /* DISABLE_JIT */