2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/monitor.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
69 #include "jit-icalls.h"
71 #include "debugger-agent.h"
72 #include "seq-points.h"
74 #define BRANCH_COST 10
75 #define INLINE_LENGTH_LIMIT 20
77 /* These have 'cfg' as an implicit argument */
78 #define INLINE_FAILURE(msg) do { \
79 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
80 inline_failure (cfg, msg); \
81 goto exception_exit; \
84 #define CHECK_CFG_EXCEPTION do {\
85 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
86 goto exception_exit; \
88 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
89 method_access_failure ((cfg), (method), (cmethod)); \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 #define DISABLE_AOT(cfg) do { \
113 if ((cfg)->verbose_level >= 2) \
114 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
115 (cfg)->disable_aot = TRUE; \
117 #define LOAD_ERROR do { \
118 break_on_unverified (); \
119 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
120 goto exception_exit; \
123 #define TYPE_LOAD_ERROR(klass) do { \
124 cfg->exception_ptr = klass; \
128 #define CHECK_CFG_ERROR do {\
129 if (!mono_error_ok (&cfg->error)) { \
130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
131 goto mono_error_exit; \
135 /* Determine whenever 'ins' represents a load of the 'this' argument */
136 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
138 static int ldind_to_load_membase (int opcode);
139 static int stind_to_store_membase (int opcode);
141 int mono_op_to_op_imm (int opcode);
142 int mono_op_to_op_imm_noemul (int opcode);
144 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
149 /* helper methods signatures */
150 static MonoMethodSignature *helper_sig_class_init_trampoline;
151 static MonoMethodSignature *helper_sig_domain_get;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
153 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
154 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
156 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
157 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
160 * Instruction metadata
168 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
169 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
175 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
180 /* keep in sync with the enum in mini.h */
183 #include "mini-ops.h"
188 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
189 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
191 * This should contain the index of the last sreg + 1. This is not the same
192 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
194 const gint8 ins_sreg_counts[] = {
195 #include "mini-ops.h"
200 #define MONO_INIT_VARINFO(vi,id) do { \
201 (vi)->range.first_use.pos.bid = 0xffff; \
207 mono_alloc_ireg (MonoCompile *cfg)
209 return alloc_ireg (cfg);
213 mono_alloc_lreg (MonoCompile *cfg)
215 return alloc_lreg (cfg);
219 mono_alloc_freg (MonoCompile *cfg)
221 return alloc_freg (cfg);
225 mono_alloc_preg (MonoCompile *cfg)
227 return alloc_preg (cfg);
231 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
233 return alloc_dreg (cfg, stack_type);
237 * mono_alloc_ireg_ref:
239 * Allocate an IREG, and mark it as holding a GC ref.
242 mono_alloc_ireg_ref (MonoCompile *cfg)
244 return alloc_ireg_ref (cfg);
248 * mono_alloc_ireg_mp:
250 * Allocate an IREG, and mark it as holding a managed pointer.
253 mono_alloc_ireg_mp (MonoCompile *cfg)
255 return alloc_ireg_mp (cfg);
259 * mono_alloc_ireg_copy:
261 * Allocate an IREG with the same GC type as VREG.
264 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
266 if (vreg_is_ref (cfg, vreg))
267 return alloc_ireg_ref (cfg);
268 else if (vreg_is_mp (cfg, vreg))
269 return alloc_ireg_mp (cfg);
271 return alloc_ireg (cfg);
275 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
280 type = mini_get_underlying_type (cfg, type);
282 switch (type->type) {
295 case MONO_TYPE_FNPTR:
297 case MONO_TYPE_CLASS:
298 case MONO_TYPE_STRING:
299 case MONO_TYPE_OBJECT:
300 case MONO_TYPE_SZARRAY:
301 case MONO_TYPE_ARRAY:
305 #if SIZEOF_REGISTER == 8
311 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
314 case MONO_TYPE_VALUETYPE:
315 if (type->data.klass->enumtype) {
316 type = mono_class_enum_basetype (type->data.klass);
319 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
322 case MONO_TYPE_TYPEDBYREF:
324 case MONO_TYPE_GENERICINST:
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->generic_sharing_context);
330 if (mini_type_var_is_vt (cfg, type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
346 printf ("\n%s %d: [IN: ", msg, bb->block_num);
347 for (i = 0; i < bb->in_count; ++i)
348 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 for (i = 0; i < bb->out_count; ++i)
351 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 for (tree = bb->code; tree; tree = tree->next)
354 mono_print_ins_index (-1, tree);
358 mono_create_helper_signatures (void)
360 helper_sig_domain_get = mono_create_icall_signature ("ptr");
361 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
364 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
365 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
366 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
367 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
383 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
384 g_free (method_fname);
385 g_free (cil_method_fname);
388 static MONO_NEVER_INLINE void
389 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
391 char *method_fname = mono_method_full_name (method, TRUE);
392 char *field_fname = mono_field_full_name (field);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
394 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
395 g_free (method_fname);
396 g_free (field_fname);
399 static MONO_NEVER_INLINE void
400 inline_failure (MonoCompile *cfg, const char *msg)
402 if (cfg->verbose_level >= 2)
403 printf ("inline failed: %s\n", msg);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
407 static MONO_NEVER_INLINE void
408 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 if (cfg->verbose_level > 2) \
411 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
415 static MONO_NEVER_INLINE void
416 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
418 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
419 if (cfg->verbose_level >= 2)
420 printf ("%s\n", cfg->exception_message);
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
425 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
426 * foo<T> (int i) { ldarg.0; box T; }
428 #define UNVERIFIED do { \
429 if (cfg->gsharedvt) { \
430 if (cfg->verbose_level > 2) \
431 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
432 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
433 goto exception_exit; \
435 break_on_unverified (); \
439 #define GET_BBLOCK(cfg,tblock,ip) do { \
440 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
442 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
443 NEW_BBLOCK (cfg, (tblock)); \
444 (tblock)->cil_code = (ip); \
445 ADD_BBLOCK (cfg, (tblock)); \
449 #if defined(TARGET_X86) || defined(TARGET_AMD64)
450 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
451 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
452 (dest)->dreg = alloc_ireg_mp ((cfg)); \
453 (dest)->sreg1 = (sr1); \
454 (dest)->sreg2 = (sr2); \
455 (dest)->inst_imm = (imm); \
456 (dest)->backend.shift_amount = (shift); \
457 MONO_ADD_INS ((cfg)->cbb, (dest)); \
461 /* Emit conversions so both operands of a binary opcode are of the same type */
463 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
465 MonoInst *arg1 = *arg1_ref;
466 MonoInst *arg2 = *arg2_ref;
469 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
470 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
473 /* Mixing r4/r8 is allowed by the spec */
474 if (arg1->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
478 conv->type = STACK_R8;
482 if (arg2->type == STACK_R4) {
483 int dreg = alloc_freg (cfg);
485 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
486 conv->type = STACK_R8;
492 #if SIZEOF_REGISTER == 8
493 /* FIXME: Need to add many more cases */
494 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
497 int dr = alloc_preg (cfg);
498 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
499 (ins)->sreg2 = widen->dreg;
504 #define ADD_BINOP(op) do { \
505 MONO_INST_NEW (cfg, ins, (op)); \
507 ins->sreg1 = sp [0]->dreg; \
508 ins->sreg2 = sp [1]->dreg; \
509 type_from_op (cfg, ins, sp [0], sp [1]); \
511 /* Have to insert a widening op */ \
512 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
513 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
518 #define ADD_UNOP(op) do { \
519 MONO_INST_NEW (cfg, ins, (op)); \
521 ins->sreg1 = sp [0]->dreg; \
522 type_from_op (cfg, ins, sp [0], NULL); \
524 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
525 MONO_ADD_INS ((cfg)->cbb, (ins)); \
526 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
529 #define ADD_BINCOND(next_block) do { \
532 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
533 cmp->sreg1 = sp [0]->dreg; \
534 cmp->sreg2 = sp [1]->dreg; \
535 type_from_op (cfg, cmp, sp [0], sp [1]); \
537 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
538 type_from_op (cfg, ins, sp [0], sp [1]); \
539 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
540 GET_BBLOCK (cfg, tblock, target); \
541 link_bblock (cfg, bblock, tblock); \
542 ins->inst_true_bb = tblock; \
543 if ((next_block)) { \
544 link_bblock (cfg, bblock, (next_block)); \
545 ins->inst_false_bb = (next_block); \
546 start_new_bblock = 1; \
548 GET_BBLOCK (cfg, tblock, ip); \
549 link_bblock (cfg, bblock, tblock); \
550 ins->inst_false_bb = tblock; \
551 start_new_bblock = 2; \
553 if (sp != stack_start) { \
554 handle_stack_args (cfg, stack_start, sp - stack_start); \
555 CHECK_UNVERIFIABLE (cfg); \
557 MONO_ADD_INS (bblock, cmp); \
558 MONO_ADD_INS (bblock, ins); \
562 * link_bblock: Links two basic blocks
564 * links two basic blocks in the control flow graph, the 'from'
565 * argument is the starting block and the 'to' argument is the block
566 * the control flow ends to after 'from'.
569 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
571 MonoBasicBlock **newa;
575 if (from->cil_code) {
577 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
579 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
582 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
584 printf ("edge from entry to exit\n");
589 for (i = 0; i < from->out_count; ++i) {
590 if (to == from->out_bb [i]) {
596 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
597 for (i = 0; i < from->out_count; ++i) {
598 newa [i] = from->out_bb [i];
606 for (i = 0; i < to->in_count; ++i) {
607 if (from == to->in_bb [i]) {
613 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
614 for (i = 0; i < to->in_count; ++i) {
615 newa [i] = to->in_bb [i];
624 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
626 link_bblock (cfg, from, to);
630 * mono_find_block_region:
632 * We mark each basic block with a region ID. We use that to avoid BB
633 * optimizations when blocks are in different regions.
636 * A region token that encodes where this region is, and information
637 * about the clause owner for this block.
639 * The region encodes the try/catch/filter clause that owns this block
640 * as well as the type. -1 is a special value that represents a block
641 * that is in none of try/catch/filter.
644 mono_find_block_region (MonoCompile *cfg, int offset)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
650 for (i = 0; i < header->num_clauses; ++i) {
651 clause = &header->clauses [i];
652 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
653 (offset < (clause->handler_offset)))
654 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
656 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
657 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
658 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
659 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
660 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
662 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
665 for (i = 0; i < header->num_clauses; ++i) {
666 clause = &header->clauses [i];
668 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
669 return ((i + 1) << 8) | clause->flags;
676 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
678 MonoMethodHeader *header = cfg->header;
679 MonoExceptionClause *clause;
683 for (i = 0; i < header->num_clauses; ++i) {
684 clause = &header->clauses [i];
685 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
686 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
687 if (clause->flags == type)
688 res = g_list_append (res, clause);
695 mono_create_spvar_for_region (MonoCompile *cfg, int region)
699 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
703 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
704 /* prevent it from being register allocated */
705 var->flags |= MONO_INST_VOLATILE;
707 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
711 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
713 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
717 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
721 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
725 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
726 /* prevent it from being register allocated */
727 var->flags |= MONO_INST_VOLATILE;
729 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
735 * Returns the type used in the eval stack when @type is loaded.
736 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
739 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
743 type = mini_get_underlying_type (cfg, type);
744 inst->klass = klass = mono_class_from_mono_type (type);
746 inst->type = STACK_MP;
751 switch (type->type) {
753 inst->type = STACK_INV;
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->generic_sharing_context);
805 if (mini_is_gsharedvt_type (cfg, type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1171 switch (args->type) {
1181 for (i = 0; i < sig->param_count; ++i) {
1182 switch (args [i].type) {
1186 if (!sig->params [i]->byref)
1190 if (sig->params [i]->byref)
1192 switch (sig->params [i]->type) {
1193 case MONO_TYPE_CLASS:
1194 case MONO_TYPE_STRING:
1195 case MONO_TYPE_OBJECT:
1196 case MONO_TYPE_SZARRAY:
1197 case MONO_TYPE_ARRAY:
1204 if (sig->params [i]->byref)
1206 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1215 /*if (!param_table [args [i].type] [sig->params [i]->type])
1223 * When we need a pointer to the current domain many times in a method, we
1224 * call mono_domain_get() once and we store the result in a local variable.
1225 * This function returns the variable that represents the MonoDomain*.
1227 inline static MonoInst *
1228 mono_get_domainvar (MonoCompile *cfg)
1230 if (!cfg->domainvar)
1231 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1232 return cfg->domainvar;
1236 * The got_var contains the address of the Global Offset Table when AOT
1240 mono_get_got_var (MonoCompile *cfg)
1242 #ifdef MONO_ARCH_NEED_GOT_VAR
1243 if (!cfg->compile_aot)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1255 mono_get_vtable_var (MonoCompile *cfg)
1257 g_assert (cfg->generic_sharing_context);
1259 if (!cfg->rgctx_var) {
1260 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 /* force the var to be stack allocated */
1262 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1265 return cfg->rgctx_var;
1269 type_from_stack_type (MonoInst *ins) {
1270 switch (ins->type) {
1271 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1272 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1273 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1274 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1275 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1277 return &ins->klass->this_arg;
1278 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1279 case STACK_VTYPE: return &ins->klass->byval_arg;
1281 g_error ("stack type %d to monotype not handled\n", ins->type);
1286 static G_GNUC_UNUSED int
1287 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1289 t = mono_type_get_underlying_type (t);
1301 case MONO_TYPE_FNPTR:
1303 case MONO_TYPE_CLASS:
1304 case MONO_TYPE_STRING:
1305 case MONO_TYPE_OBJECT:
1306 case MONO_TYPE_SZARRAY:
1307 case MONO_TYPE_ARRAY:
1313 return cfg->r4_stack_type;
1316 case MONO_TYPE_VALUETYPE:
1317 case MONO_TYPE_TYPEDBYREF:
1319 case MONO_TYPE_GENERICINST:
1320 if (mono_type_generic_inst_is_valuetype (t))
1326 g_assert_not_reached ();
1333 array_access_to_klass (int opcode)
1337 return mono_defaults.byte_class;
1339 return mono_defaults.uint16_class;
1342 return mono_defaults.int_class;
1345 return mono_defaults.sbyte_class;
1348 return mono_defaults.int16_class;
1351 return mono_defaults.int32_class;
1353 return mono_defaults.uint32_class;
1356 return mono_defaults.int64_class;
1359 return mono_defaults.single_class;
1362 return mono_defaults.double_class;
1363 case CEE_LDELEM_REF:
1364 case CEE_STELEM_REF:
1365 return mono_defaults.object_class;
1367 g_assert_not_reached ();
1373 * We try to share variables when possible
1376 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1381 /* inlining can result in deeper stacks */
1382 if (slot >= cfg->header->max_stack)
1383 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1385 pos = ins->type - 1 + slot * STACK_MAX;
1387 switch (ins->type) {
1394 if ((vnum = cfg->intvars [pos]))
1395 return cfg->varinfo [vnum];
1396 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1397 cfg->intvars [pos] = res->inst_c0;
1400 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1406 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1409 * Don't use this if a generic_context is set, since that means AOT can't
1410 * look up the method using just the image+token.
1411 * table == 0 means this is a reference made from a wrapper.
1413 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1414 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1415 jump_info_token->image = image;
1416 jump_info_token->token = token;
1417 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1422 * This function is called to handle items that are left on the evaluation stack
1423 * at basic block boundaries. What happens is that we save the values to local variables
1424 * and we reload them later when first entering the target basic block (with the
1425 * handle_loaded_temps () function).
1426 * A single joint point will use the same variables (stored in the array bb->out_stack or
1427 * bb->in_stack, if the basic block is before or after the joint point).
1429 * This function needs to be called _before_ emitting the last instruction of
1430 * the bb (i.e. before emitting a branch).
1431 * If the stack merge fails at a join point, cfg->unverifiable is set.
1434 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1437 MonoBasicBlock *bb = cfg->cbb;
1438 MonoBasicBlock *outb;
1439 MonoInst *inst, **locals;
1444 if (cfg->verbose_level > 3)
1445 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1446 if (!bb->out_scount) {
1447 bb->out_scount = count;
1448 //printf ("bblock %d has out:", bb->block_num);
1450 for (i = 0; i < bb->out_count; ++i) {
1451 outb = bb->out_bb [i];
1452 /* exception handlers are linked, but they should not be considered for stack args */
1453 if (outb->flags & BB_EXCEPTION_HANDLER)
1455 //printf (" %d", outb->block_num);
1456 if (outb->in_stack) {
1458 bb->out_stack = outb->in_stack;
1464 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1465 for (i = 0; i < count; ++i) {
1467 * try to reuse temps already allocated for this purpouse, if they occupy the same
1468 * stack slot and if they are of the same type.
1469 * This won't cause conflicts since if 'local' is used to
1470 * store one of the values in the in_stack of a bblock, then
1471 * the same variable will be used for the same outgoing stack
1473 * This doesn't work when inlining methods, since the bblocks
1474 * in the inlined methods do not inherit their in_stack from
1475 * the bblock they are inlined to. See bug #58863 for an
1478 if (cfg->inlined_method)
1479 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1481 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1486 for (i = 0; i < bb->out_count; ++i) {
1487 outb = bb->out_bb [i];
1488 /* exception handlers are linked, but they should not be considered for stack args */
1489 if (outb->flags & BB_EXCEPTION_HANDLER)
1491 if (outb->in_scount) {
1492 if (outb->in_scount != bb->out_scount) {
1493 cfg->unverifiable = TRUE;
1496 continue; /* check they are the same locals */
1498 outb->in_scount = count;
1499 outb->in_stack = bb->out_stack;
1502 locals = bb->out_stack;
1504 for (i = 0; i < count; ++i) {
1505 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1506 inst->cil_code = sp [i]->cil_code;
1507 sp [i] = locals [i];
1508 if (cfg->verbose_level > 3)
1509 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1513 * It is possible that the out bblocks already have in_stack assigned, and
1514 * the in_stacks differ. In this case, we will store to all the different
1521 /* Find a bblock which has a different in_stack */
1523 while (bindex < bb->out_count) {
1524 outb = bb->out_bb [bindex];
1525 /* exception handlers are linked, but they should not be considered for stack args */
1526 if (outb->flags & BB_EXCEPTION_HANDLER) {
1530 if (outb->in_stack != locals) {
1531 for (i = 0; i < count; ++i) {
1532 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1533 inst->cil_code = sp [i]->cil_code;
1534 sp [i] = locals [i];
1535 if (cfg->verbose_level > 3)
1536 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1538 locals = outb->in_stack;
1548 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1550 int ibitmap_reg = alloc_preg (cfg);
1551 #ifdef COMPRESSED_INTERFACE_BITMAP
1553 MonoInst *res, *ins;
1554 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1555 MONO_ADD_INS (cfg->cbb, ins);
1557 if (cfg->compile_aot)
1558 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1560 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1561 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1562 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1564 int ibitmap_byte_reg = alloc_preg (cfg);
1566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1568 if (cfg->compile_aot) {
1569 int iid_reg = alloc_preg (cfg);
1570 int shifted_iid_reg = alloc_preg (cfg);
1571 int ibitmap_byte_address_reg = alloc_preg (cfg);
1572 int masked_iid_reg = alloc_preg (cfg);
1573 int iid_one_bit_reg = alloc_preg (cfg);
1574 int iid_bit_reg = alloc_preg (cfg);
1575 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1577 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1580 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1581 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1582 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1584 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1585 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1591 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1592 * stored in "klass_reg" implements the interface "klass".
1595 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1597 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1601 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1602 * stored in "vtable_reg" implements the interface "klass".
1605 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1607 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1611 * Emit code which checks whenever the interface id of @klass is smaller than
1612 * than the value given by max_iid_reg.
1615 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1616 MonoBasicBlock *false_target)
1618 if (cfg->compile_aot) {
1619 int iid_reg = alloc_preg (cfg);
1620 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1621 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1626 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1628 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1631 /* Same as above, but obtains max_iid from a vtable */
1633 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1634 MonoBasicBlock *false_target)
1636 int max_iid_reg = alloc_preg (cfg);
1638 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1639 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1642 /* Same as above, but obtains max_iid from a klass */
1644 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1645 MonoBasicBlock *false_target)
1647 int max_iid_reg = alloc_preg (cfg);
1649 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1650 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1654 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1656 int idepth_reg = alloc_preg (cfg);
1657 int stypes_reg = alloc_preg (cfg);
1658 int stype = alloc_preg (cfg);
1660 mono_class_setup_supertypes (klass);
1662 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1663 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1670 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1671 } else if (cfg->compile_aot) {
1672 int const_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1682 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1684 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1688 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1690 int intf_reg = alloc_preg (cfg);
1692 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1693 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1698 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1702 * Variant of the above that takes a register to the class, not the vtable.
1705 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 int intf_bit_reg = alloc_preg (cfg);
1709 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1710 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1715 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1719 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1722 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1723 } else if (cfg->compile_aot) {
1724 int const_reg = alloc_preg (cfg);
1725 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1726 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1730 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1734 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1736 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1740 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1742 if (cfg->compile_aot) {
1743 int const_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1745 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1753 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1756 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1759 int rank_reg = alloc_preg (cfg);
1760 int eclass_reg = alloc_preg (cfg);
1762 g_assert (!klass_inst);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1765 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1766 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1768 if (klass->cast_class == mono_defaults.object_class) {
1769 int parent_reg = alloc_preg (cfg);
1770 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1771 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1772 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1773 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1774 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1775 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1776 } else if (klass->cast_class == mono_defaults.enum_class) {
1777 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1778 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1779 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1781 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1782 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1785 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1786 /* Check that the object is a vector too */
1787 int bounds_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1790 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1793 int idepth_reg = alloc_preg (cfg);
1794 int stypes_reg = alloc_preg (cfg);
1795 int stype = alloc_preg (cfg);
1797 mono_class_setup_supertypes (klass);
1799 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1802 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1806 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1811 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1813 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1817 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1821 g_assert (val == 0);
1826 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1829 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1832 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1835 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1837 #if SIZEOF_REGISTER == 8
1839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1845 val_reg = alloc_preg (cfg);
1847 if (SIZEOF_REGISTER == 8)
1848 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1850 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1853 /* This could be optimized further if neccesary */
1855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1862 #if !NO_UNALIGNED_ACCESS
1863 if (SIZEOF_REGISTER == 8) {
1865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1895 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1902 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1903 g_assert (size < 10000);
1906 /* This could be optimized further if neccesary */
1908 cur_reg = alloc_preg (cfg);
1909 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1917 #if !NO_UNALIGNED_ACCESS
1918 if (SIZEOF_REGISTER == 8) {
1920 cur_reg = alloc_preg (cfg);
1921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1931 cur_reg = alloc_preg (cfg);
1932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1939 cur_reg = alloc_preg (cfg);
1940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1941 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1947 cur_reg = alloc_preg (cfg);
1948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1949 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1957 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1961 if (cfg->compile_aot) {
1962 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1963 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1965 ins->sreg2 = c->dreg;
1966 MONO_ADD_INS (cfg->cbb, ins);
1968 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1970 ins->inst_offset = mini_get_tls_offset (tls_key);
1971 MONO_ADD_INS (cfg->cbb, ins);
1978 * Emit IR to push the current LMF onto the LMF stack.
1981 emit_push_lmf (MonoCompile *cfg)
1984 * Emit IR to push the LMF:
1985 * lmf_addr = <lmf_addr from tls>
1986 * lmf->lmf_addr = lmf_addr
1987 * lmf->prev_lmf = *lmf_addr
1990 int lmf_reg, prev_lmf_reg;
1991 MonoInst *ins, *lmf_ins;
1996 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1997 /* Load current lmf */
1998 lmf_ins = mono_get_lmf_intrinsic (cfg);
2000 MONO_ADD_INS (cfg->cbb, lmf_ins);
2001 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2002 lmf_reg = ins->dreg;
2003 /* Save previous_lmf */
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2006 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2009 * Store lmf_addr in a variable, so it can be allocated to a global register.
2011 if (!cfg->lmf_addr_var)
2012 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2015 ins = mono_get_jit_tls_intrinsic (cfg);
2017 int jit_tls_dreg = ins->dreg;
2019 MONO_ADD_INS (cfg->cbb, ins);
2020 lmf_reg = alloc_preg (cfg);
2021 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2023 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2026 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2028 MONO_ADD_INS (cfg->cbb, lmf_ins);
2031 MonoInst *args [16], *jit_tls_ins, *ins;
2033 /* Inline mono_get_lmf_addr () */
2034 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2036 /* Load mono_jit_tls_id */
2037 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2038 /* call pthread_getspecific () */
2039 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2040 /* lmf_addr = &jit_tls->lmf */
2041 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2044 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2048 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2050 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2051 lmf_reg = ins->dreg;
2053 prev_lmf_reg = alloc_preg (cfg);
2054 /* Save previous_lmf */
2055 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2056 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2058 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2065 * Emit IR to pop the current LMF from the LMF stack.
2068 emit_pop_lmf (MonoCompile *cfg)
2070 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2076 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2077 lmf_reg = ins->dreg;
2079 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2080 /* Load previous_lmf */
2081 prev_lmf_reg = alloc_preg (cfg);
2082 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2084 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2087 * Emit IR to pop the LMF:
2088 * *(lmf->lmf_addr) = lmf->prev_lmf
2090 /* This could be called before emit_push_lmf () */
2091 if (!cfg->lmf_addr_var)
2092 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2093 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2095 prev_lmf_reg = alloc_preg (cfg);
2096 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2097 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2102 emit_instrumentation_call (MonoCompile *cfg, void *func)
2104 MonoInst *iargs [1];
2107 * Avoid instrumenting inlined methods since it can
2108 * distort profiling results.
2110 if (cfg->method != cfg->current_method)
2113 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2114 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2115 mono_emit_jit_icall (cfg, func, iargs);
2120 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2123 type = mini_get_underlying_type (cfg, type);
2124 switch (type->type) {
2125 case MONO_TYPE_VOID:
2126 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2133 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2137 case MONO_TYPE_FNPTR:
2138 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2139 case MONO_TYPE_CLASS:
2140 case MONO_TYPE_STRING:
2141 case MONO_TYPE_OBJECT:
2142 case MONO_TYPE_SZARRAY:
2143 case MONO_TYPE_ARRAY:
2144 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2147 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2150 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2152 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2154 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2155 case MONO_TYPE_VALUETYPE:
2156 if (type->data.klass->enumtype) {
2157 type = mono_class_enum_basetype (type->data.klass);
2160 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2161 case MONO_TYPE_TYPEDBYREF:
2162 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2163 case MONO_TYPE_GENERICINST:
2164 type = &type->data.generic_class->container_class->byval_arg;
2167 case MONO_TYPE_MVAR:
2169 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2171 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2177 * target_type_is_incompatible:
2178 * @cfg: MonoCompile context
2180 * Check that the item @arg on the evaluation stack can be stored
2181 * in the target type (can be a local, or field, etc).
2182 * The cfg arg can be used to check if we need verification or just
2185 * Returns: non-0 value if arg can't be stored on a target.
2188 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2190 MonoType *simple_type;
2193 if (target->byref) {
2194 /* FIXME: check that the pointed to types match */
2195 if (arg->type == STACK_MP)
2196 return arg->klass != mono_class_from_mono_type (target);
2197 if (arg->type == STACK_PTR)
2202 simple_type = mini_get_underlying_type (cfg, target);
2203 switch (simple_type->type) {
2204 case MONO_TYPE_VOID:
2212 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2216 /* STACK_MP is needed when setting pinned locals */
2217 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2222 case MONO_TYPE_FNPTR:
2224 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2225 * in native int. (#688008).
2227 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2230 case MONO_TYPE_CLASS:
2231 case MONO_TYPE_STRING:
2232 case MONO_TYPE_OBJECT:
2233 case MONO_TYPE_SZARRAY:
2234 case MONO_TYPE_ARRAY:
2235 if (arg->type != STACK_OBJ)
2237 /* FIXME: check type compatibility */
2241 if (arg->type != STACK_I8)
2245 if (arg->type != cfg->r4_stack_type)
2249 if (arg->type != STACK_R8)
2252 case MONO_TYPE_VALUETYPE:
2253 if (arg->type != STACK_VTYPE)
2255 klass = mono_class_from_mono_type (simple_type);
2256 if (klass != arg->klass)
2259 case MONO_TYPE_TYPEDBYREF:
2260 if (arg->type != STACK_VTYPE)
2262 klass = mono_class_from_mono_type (simple_type);
2263 if (klass != arg->klass)
2266 case MONO_TYPE_GENERICINST:
2267 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2268 if (arg->type != STACK_VTYPE)
2270 klass = mono_class_from_mono_type (simple_type);
2271 /* The second cases is needed when doing partial sharing */
2272 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2276 if (arg->type != STACK_OBJ)
2278 /* FIXME: check type compatibility */
2282 case MONO_TYPE_MVAR:
2283 g_assert (cfg->generic_sharing_context);
2284 if (mini_type_var_is_vt (cfg, simple_type)) {
2285 if (arg->type != STACK_VTYPE)
2288 if (arg->type != STACK_OBJ)
2293 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2299 * Prepare arguments for passing to a function call.
2300 * Return a non-zero value if the arguments can't be passed to the given
2302 * The type checks are not yet complete and some conversions may need
2303 * casts on 32 or 64 bit architectures.
2305 * FIXME: implement this using target_type_is_incompatible ()
2308 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2310 MonoType *simple_type;
2314 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2318 for (i = 0; i < sig->param_count; ++i) {
2319 if (sig->params [i]->byref) {
2320 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2324 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2326 switch (simple_type->type) {
2327 case MONO_TYPE_VOID:
2336 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2342 case MONO_TYPE_FNPTR:
2343 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2346 case MONO_TYPE_CLASS:
2347 case MONO_TYPE_STRING:
2348 case MONO_TYPE_OBJECT:
2349 case MONO_TYPE_SZARRAY:
2350 case MONO_TYPE_ARRAY:
2351 if (args [i]->type != STACK_OBJ)
2356 if (args [i]->type != STACK_I8)
2360 if (args [i]->type != cfg->r4_stack_type)
2364 if (args [i]->type != STACK_R8)
2367 case MONO_TYPE_VALUETYPE:
2368 if (simple_type->data.klass->enumtype) {
2369 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2372 if (args [i]->type != STACK_VTYPE)
2375 case MONO_TYPE_TYPEDBYREF:
2376 if (args [i]->type != STACK_VTYPE)
2379 case MONO_TYPE_GENERICINST:
2380 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2383 case MONO_TYPE_MVAR:
2385 if (args [i]->type != STACK_VTYPE)
2389 g_error ("unknown type 0x%02x in check_call_signature",
2397 callvirt_to_call (int opcode)
2400 case OP_CALL_MEMBASE:
2402 case OP_VOIDCALL_MEMBASE:
2404 case OP_FCALL_MEMBASE:
2406 case OP_RCALL_MEMBASE:
2408 case OP_VCALL_MEMBASE:
2410 case OP_LCALL_MEMBASE:
2413 g_assert_not_reached ();
2419 /* Either METHOD or IMT_ARG needs to be set */
2421 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2425 if (COMPILE_LLVM (cfg)) {
2426 method_reg = alloc_preg (cfg);
2429 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2430 } else if (cfg->compile_aot) {
2431 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2434 MONO_INST_NEW (cfg, ins, OP_PCONST);
2435 ins->inst_p0 = method;
2436 ins->dreg = method_reg;
2437 MONO_ADD_INS (cfg->cbb, ins);
2441 call->imt_arg_reg = method_reg;
2443 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2447 method_reg = alloc_preg (cfg);
2450 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2451 } else if (cfg->compile_aot) {
2452 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2455 MONO_INST_NEW (cfg, ins, OP_PCONST);
2456 ins->inst_p0 = method;
2457 ins->dreg = method_reg;
2458 MONO_ADD_INS (cfg->cbb, ins);
2461 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2464 static MonoJumpInfo *
2465 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2467 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2471 ji->data.target = target;
2477 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2479 if (cfg->generic_sharing_context)
2480 return mono_class_check_context_used (klass);
2486 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2488 if (cfg->generic_sharing_context)
2489 return mono_method_check_context_used (method);
2495 * check_method_sharing:
2497 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2500 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2502 gboolean pass_vtable = FALSE;
2503 gboolean pass_mrgctx = FALSE;
2505 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2506 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2507 gboolean sharable = FALSE;
2509 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2512 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2513 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2514 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2516 sharable = sharing_enabled && context_sharable;
2520 * Pass vtable iff target method might
2521 * be shared, which means that sharing
2522 * is enabled for its class and its
2523 * context is sharable (and it's not a
2526 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2530 if (mini_method_get_context (cmethod) &&
2531 mini_method_get_context (cmethod)->method_inst) {
2532 g_assert (!pass_vtable);
2534 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2537 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2538 MonoGenericContext *context = mini_method_get_context (cmethod);
2539 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2541 if (sharing_enabled && context_sharable)
2543 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2548 if (out_pass_vtable)
2549 *out_pass_vtable = pass_vtable;
2550 if (out_pass_mrgctx)
2551 *out_pass_mrgctx = pass_mrgctx;
2554 inline static MonoCallInst *
2555 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2556 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2560 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2565 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2567 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2569 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2572 call->signature = sig;
2573 call->rgctx_reg = rgctx;
2574 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2576 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2579 if (mini_type_is_vtype (cfg, sig_ret)) {
2580 call->vret_var = cfg->vret_addr;
2581 //g_assert_not_reached ();
2583 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2584 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2587 temp->backend.is_pinvoke = sig->pinvoke;
2590 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2591 * address of return value to increase optimization opportunities.
2592 * Before vtype decomposition, the dreg of the call ins itself represents the
2593 * fact the call modifies the return value. After decomposition, the call will
2594 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2595 * will be transformed into an LDADDR.
2597 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2598 loada->dreg = alloc_preg (cfg);
2599 loada->inst_p0 = temp;
2600 /* We reference the call too since call->dreg could change during optimization */
2601 loada->inst_p1 = call;
2602 MONO_ADD_INS (cfg->cbb, loada);
2604 call->inst.dreg = temp->dreg;
2606 call->vret_var = loada;
2607 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2608 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2610 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2611 if (COMPILE_SOFT_FLOAT (cfg)) {
2613 * If the call has a float argument, we would need to do an r8->r4 conversion using
2614 * an icall, but that cannot be done during the call sequence since it would clobber
2615 * the call registers + the stack. So we do it before emitting the call.
2617 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2619 MonoInst *in = call->args [i];
2621 if (i >= sig->hasthis)
2622 t = sig->params [i - sig->hasthis];
2624 t = &mono_defaults.int_class->byval_arg;
2625 t = mono_type_get_underlying_type (t);
2627 if (!t->byref && t->type == MONO_TYPE_R4) {
2628 MonoInst *iargs [1];
2632 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2634 /* The result will be in an int vreg */
2635 call->args [i] = conv;
2641 call->need_unbox_trampoline = unbox_trampoline;
2644 if (COMPILE_LLVM (cfg))
2645 mono_llvm_emit_call (cfg, call);
2647 mono_arch_emit_call (cfg, call);
2649 mono_arch_emit_call (cfg, call);
2652 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2653 cfg->flags |= MONO_CFG_HAS_CALLS;
2659 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2661 #ifdef MONO_ARCH_RGCTX_REG
2662 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2663 cfg->uses_rgctx_reg = TRUE;
2664 call->rgctx_reg = TRUE;
2666 call->rgctx_arg_reg = rgctx_reg;
2673 inline static MonoInst*
2674 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2679 gboolean check_sp = FALSE;
2681 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2682 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2684 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2689 rgctx_reg = mono_alloc_preg (cfg);
2690 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2694 if (!cfg->stack_inbalance_var)
2695 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2697 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2698 ins->dreg = cfg->stack_inbalance_var->dreg;
2699 MONO_ADD_INS (cfg->cbb, ins);
2702 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2704 call->inst.sreg1 = addr->dreg;
2707 emit_imt_argument (cfg, call, NULL, imt_arg);
2709 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2714 sp_reg = mono_alloc_preg (cfg);
2716 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2718 MONO_ADD_INS (cfg->cbb, ins);
2720 /* Restore the stack so we don't crash when throwing the exception */
2721 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2722 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2723 MONO_ADD_INS (cfg->cbb, ins);
2725 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2730 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2732 return (MonoInst*)call;
2736 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2739 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2741 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2744 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2745 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2747 #ifndef DISABLE_REMOTING
2748 gboolean might_be_remote = FALSE;
2750 gboolean virtual = this != NULL;
2751 gboolean enable_for_aot = TRUE;
2755 gboolean need_unbox_trampoline;
2758 sig = mono_method_signature (method);
2761 rgctx_reg = mono_alloc_preg (cfg);
2762 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2765 if (method->string_ctor) {
2766 /* Create the real signature */
2767 /* FIXME: Cache these */
2768 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2769 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2774 context_used = mini_method_check_context_used (cfg, method);
2776 #ifndef DISABLE_REMOTING
2777 might_be_remote = this && sig->hasthis &&
2778 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2779 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2781 if (might_be_remote && context_used) {
2784 g_assert (cfg->generic_sharing_context);
2786 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2788 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2792 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2794 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2796 #ifndef DISABLE_REMOTING
2797 if (might_be_remote)
2798 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2801 call->method = method;
2802 call->inst.flags |= MONO_INST_HAS_METHOD;
2803 call->inst.inst_left = this;
2804 call->tail_call = tail;
2807 int vtable_reg, slot_reg, this_reg;
2810 this_reg = this->dreg;
2812 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2813 MonoInst *dummy_use;
2815 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2817 /* Make a call to delegate->invoke_impl */
2818 call->inst.inst_basereg = this_reg;
2819 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2820 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2822 /* We must emit a dummy use here because the delegate trampoline will
2823 replace the 'this' argument with the delegate target making this activation
2824 no longer a root for the delegate.
2825 This is an issue for delegates that target collectible code such as dynamic
2826 methods of GC'able assemblies.
2828 For a test case look into #667921.
2830 FIXME: a dummy use is not the best way to do it as the local register allocator
2831 will put it on a caller save register and spil it around the call.
2832 Ideally, we would either put it on a callee save register or only do the store part.
2834 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2836 return (MonoInst*)call;
2839 if ((!cfg->compile_aot || enable_for_aot) &&
2840 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2841 (MONO_METHOD_IS_FINAL (method) &&
2842 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2843 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2845 * the method is not virtual, we just need to ensure this is not null
2846 * and then we can call the method directly.
2848 #ifndef DISABLE_REMOTING
2849 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2851 * The check above ensures method is not gshared, this is needed since
2852 * gshared methods can't have wrappers.
2854 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2858 if (!method->string_ctor)
2859 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2861 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2862 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2864 * the method is virtual, but we can statically dispatch since either
2865 * it's class or the method itself are sealed.
2866 * But first we need to ensure it's not a null reference.
2868 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2870 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2872 vtable_reg = alloc_preg (cfg);
2873 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2874 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2875 guint32 imt_slot = mono_method_get_imt_slot (method);
2876 emit_imt_argument (cfg, call, call->method, imt_arg);
2877 slot_reg = vtable_reg;
2878 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2880 slot_reg = vtable_reg;
2881 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2882 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2884 g_assert (mono_method_signature (method)->generic_param_count);
2885 emit_imt_argument (cfg, call, call->method, imt_arg);
2889 call->inst.sreg1 = slot_reg;
2890 call->inst.inst_offset = offset;
2891 call->virtual = TRUE;
2895 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2898 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2900 return (MonoInst*)call;
2904 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2906 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2910 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2917 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2920 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2922 return (MonoInst*)call;
2926 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2928 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2932 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2936 * mono_emit_abs_call:
2938 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2940 inline static MonoInst*
2941 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2942 MonoMethodSignature *sig, MonoInst **args)
2944 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2948 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2951 if (cfg->abs_patches == NULL)
2952 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2953 g_hash_table_insert (cfg->abs_patches, ji, ji);
2954 ins = mono_emit_native_call (cfg, ji, sig, args);
2955 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2960 direct_icalls_enabled (MonoCompile *cfg)
2962 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2964 if (cfg->compile_llvm)
2967 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2973 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
2976 * Call the jit icall without a wrapper if possible.
2977 * The wrapper is needed for the following reasons:
2978 * - to handle exceptions thrown using mono_raise_exceptions () from the
2979 * icall function. The EH code needs the lmf frame pushed by the
2980 * wrapper to be able to unwind back to managed code.
2981 * - to be able to do stack walks for asynchronously suspended
2982 * threads when debugging.
2984 if (info->no_raise && direct_icalls_enabled (cfg)) {
2988 if (!info->wrapper_method) {
2989 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2990 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2992 mono_memory_barrier ();
2996 * Inline the wrapper method, which is basically a call to the C icall, and
2997 * an exception check.
2999 costs = inline_method (cfg, info->wrapper_method, NULL,
3000 args, NULL, cfg->real_offset, TRUE, out_cbb);
3001 g_assert (costs > 0);
3002 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3006 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3011 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3013 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3014 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3018 * Native code might return non register sized integers
3019 * without initializing the upper bits.
3021 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3022 case OP_LOADI1_MEMBASE:
3023 widen_op = OP_ICONV_TO_I1;
3025 case OP_LOADU1_MEMBASE:
3026 widen_op = OP_ICONV_TO_U1;
3028 case OP_LOADI2_MEMBASE:
3029 widen_op = OP_ICONV_TO_I2;
3031 case OP_LOADU2_MEMBASE:
3032 widen_op = OP_ICONV_TO_U2;
3038 if (widen_op != -1) {
3039 int dreg = alloc_preg (cfg);
3042 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3043 widen->type = ins->type;
3053 get_memcpy_method (void)
3055 static MonoMethod *memcpy_method = NULL;
3056 if (!memcpy_method) {
3057 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3059 g_error ("Old corlib found. Install a new one");
3061 return memcpy_method;
3065 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3067 MonoClassField *field;
3068 gpointer iter = NULL;
3070 while ((field = mono_class_get_fields (klass, &iter))) {
3073 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3075 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3076 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3077 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3078 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3080 MonoClass *field_class = mono_class_from_mono_type (field->type);
3081 if (field_class->has_references)
3082 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3088 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3090 int card_table_shift_bits;
3091 gpointer card_table_mask;
3093 MonoInst *dummy_use;
3094 int nursery_shift_bits;
3095 size_t nursery_size;
3096 gboolean has_card_table_wb = FALSE;
3098 if (!cfg->gen_write_barriers)
3101 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3103 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3105 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3106 has_card_table_wb = TRUE;
3109 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3112 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3113 wbarrier->sreg1 = ptr->dreg;
3114 wbarrier->sreg2 = value->dreg;
3115 MONO_ADD_INS (cfg->cbb, wbarrier);
3116 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3117 int offset_reg = alloc_preg (cfg);
3118 int card_reg = alloc_preg (cfg);
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3122 if (card_table_mask)
3123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3125 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3126 * IMM's larger than 32bits.
3128 if (cfg->compile_aot) {
3129 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3131 MONO_INST_NEW (cfg, ins, OP_PCONST);
3132 ins->inst_p0 = card_table;
3133 ins->dreg = card_reg;
3134 MONO_ADD_INS (cfg->cbb, ins);
3137 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3138 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3140 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3141 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3144 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3148 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3150 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3151 unsigned need_wb = 0;
3156 /*types with references can't have alignment smaller than sizeof(void*) */
3157 if (align < SIZEOF_VOID_P)
3160 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3161 if (size > 32 * SIZEOF_VOID_P)
3164 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3166 /* We don't unroll more than 5 stores to avoid code bloat. */
3167 if (size > 5 * SIZEOF_VOID_P) {
3168 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3169 size += (SIZEOF_VOID_P - 1);
3170 size &= ~(SIZEOF_VOID_P - 1);
3172 EMIT_NEW_ICONST (cfg, iargs [2], size);
3173 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3174 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3178 destreg = iargs [0]->dreg;
3179 srcreg = iargs [1]->dreg;
3182 dest_ptr_reg = alloc_preg (cfg);
3183 tmp_reg = alloc_preg (cfg);
3186 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3188 while (size >= SIZEOF_VOID_P) {
3189 MonoInst *load_inst;
3190 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3191 load_inst->dreg = tmp_reg;
3192 load_inst->inst_basereg = srcreg;
3193 load_inst->inst_offset = offset;
3194 MONO_ADD_INS (cfg->cbb, load_inst);
3196 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3199 emit_write_barrier (cfg, iargs [0], load_inst);
3201 offset += SIZEOF_VOID_P;
3202 size -= SIZEOF_VOID_P;
3205 /*tmp += sizeof (void*)*/
3206 if (size >= SIZEOF_VOID_P) {
3207 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3208 MONO_ADD_INS (cfg->cbb, iargs [0]);
3212 /* Those cannot be references since size < sizeof (void*) */
3214 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3215 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3221 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3228 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3229 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3238 * Emit code to copy a valuetype of type @klass whose address is stored in
3239 * @src->dreg to memory whose address is stored at @dest->dreg.
3242 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3244 MonoInst *iargs [4];
3247 MonoMethod *memcpy_method;
3248 MonoInst *size_ins = NULL;
3249 MonoInst *memcpy_ins = NULL;
3252 if (cfg->generic_sharing_context)
3253 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3256 * This check breaks with spilled vars... need to handle it during verification anyway.
3257 * g_assert (klass && klass == src->klass && klass == dest->klass);
3260 if (mini_is_gsharedvt_klass (cfg, klass)) {
3262 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3263 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3267 n = mono_class_native_size (klass, &align);
3269 n = mono_class_value_size (klass, &align);
3271 /* if native is true there should be no references in the struct */
3272 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3273 /* Avoid barriers when storing to the stack */
3274 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3275 (dest->opcode == OP_LDADDR))) {
3281 context_used = mini_class_check_context_used (cfg, klass);
3283 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3284 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3286 } else if (context_used) {
3287 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3289 if (cfg->compile_aot) {
3290 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3292 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3293 mono_class_compute_gc_descriptor (klass);
3298 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3300 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3305 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3306 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3307 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3312 iargs [2] = size_ins;
3314 EMIT_NEW_ICONST (cfg, iargs [2], n);
3316 memcpy_method = get_memcpy_method ();
3318 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3320 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3325 get_memset_method (void)
3327 static MonoMethod *memset_method = NULL;
3328 if (!memset_method) {
3329 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3331 g_error ("Old corlib found. Install a new one");
3333 return memset_method;
3337 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3339 MonoInst *iargs [3];
3342 MonoMethod *memset_method;
3343 MonoInst *size_ins = NULL;
3344 MonoInst *bzero_ins = NULL;
3345 static MonoMethod *bzero_method;
3347 /* FIXME: Optimize this for the case when dest is an LDADDR */
3348 mono_class_init (klass);
3349 if (mini_is_gsharedvt_klass (cfg, klass)) {
3350 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3351 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3353 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3354 g_assert (bzero_method);
3356 iargs [1] = size_ins;
3357 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3361 n = mono_class_value_size (klass, &align);
3363 if (n <= sizeof (gpointer) * 8) {
3364 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3367 memset_method = get_memset_method ();
3369 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3370 EMIT_NEW_ICONST (cfg, iargs [2], n);
3371 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3376 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3378 MonoInst *this = NULL;
3380 g_assert (cfg->generic_sharing_context);
3382 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3383 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3384 !method->klass->valuetype)
3385 EMIT_NEW_ARGLOAD (cfg, this, 0);
3387 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3388 MonoInst *mrgctx_loc, *mrgctx_var;
3391 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3393 mrgctx_loc = mono_get_vtable_var (cfg);
3394 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3397 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3398 MonoInst *vtable_loc, *vtable_var;
3402 vtable_loc = mono_get_vtable_var (cfg);
3403 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3405 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3406 MonoInst *mrgctx_var = vtable_var;
3409 vtable_reg = alloc_preg (cfg);
3410 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3411 vtable_var->type = STACK_PTR;
3419 vtable_reg = alloc_preg (cfg);
3420 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3425 static MonoJumpInfoRgctxEntry *
3426 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3428 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3429 res->method = method;
3430 res->in_mrgctx = in_mrgctx;
3431 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3432 res->data->type = patch_type;
3433 res->data->data.target = patch_data;
3434 res->info_type = info_type;
3439 static inline MonoInst*
3440 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3442 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3446 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3447 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3449 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3450 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3452 return emit_rgctx_fetch (cfg, rgctx, entry);
3456 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3457 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3459 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3460 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3462 return emit_rgctx_fetch (cfg, rgctx, entry);
3466 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3467 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3469 MonoJumpInfoGSharedVtCall *call_info;
3470 MonoJumpInfoRgctxEntry *entry;
3473 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3474 call_info->sig = sig;
3475 call_info->method = cmethod;
3477 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3478 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3480 return emit_rgctx_fetch (cfg, rgctx, entry);
3484 * emit_get_rgctx_virt_method:
3486 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3489 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3490 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3492 MonoJumpInfoVirtMethod *info;
3493 MonoJumpInfoRgctxEntry *entry;
3496 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3497 info->klass = klass;
3498 info->method = virt_method;
3500 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3501 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3503 return emit_rgctx_fetch (cfg, rgctx, entry);
3507 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3508 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3510 MonoJumpInfoRgctxEntry *entry;
3513 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3514 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3516 return emit_rgctx_fetch (cfg, rgctx, entry);
3520 * emit_get_rgctx_method:
3522 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3523 * normal constants, else emit a load from the rgctx.
3526 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3527 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3529 if (!context_used) {
3532 switch (rgctx_type) {
3533 case MONO_RGCTX_INFO_METHOD:
3534 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3536 case MONO_RGCTX_INFO_METHOD_RGCTX:
3537 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3540 g_assert_not_reached ();
3543 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3544 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3546 return emit_rgctx_fetch (cfg, rgctx, entry);
3551 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3552 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3554 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3555 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3557 return emit_rgctx_fetch (cfg, rgctx, entry);
3561 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3563 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3564 MonoRuntimeGenericContextInfoTemplate *template;
3569 for (i = 0; i < info->num_entries; ++i) {
3570 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3572 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3576 if (info->num_entries == info->count_entries) {
3577 MonoRuntimeGenericContextInfoTemplate *new_entries;
3578 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3580 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3582 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3583 info->entries = new_entries;
3584 info->count_entries = new_count_entries;
3587 idx = info->num_entries;
3588 template = &info->entries [idx];
3589 template->info_type = rgctx_type;
3590 template->data = data;
3592 info->num_entries ++;
3598 * emit_get_gsharedvt_info:
3600 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3603 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3608 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3609 /* Load info->entries [idx] */
3610 dreg = alloc_preg (cfg);
3611 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3617 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3619 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3623 * On return the caller must check @klass for load errors.
3626 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3628 MonoInst *vtable_arg;
3631 context_used = mini_class_check_context_used (cfg, klass);
3634 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3635 klass, MONO_RGCTX_INFO_VTABLE);
3637 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3641 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3644 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3648 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3649 * so this doesn't have to clobber any regs.
3652 * For LLVM, this requires that the code in the generic trampoline obtain the vtable argument according to
3653 * the normal calling convention of the platform.
3655 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3656 ins->sreg1 = vtable_arg->dreg;
3657 MONO_ADD_INS (cfg->cbb, ins);
3661 if (COMPILE_LLVM (cfg))
3662 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3664 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3665 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3666 cfg->uses_vtable_reg = TRUE;
3671 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3675 if (cfg->gen_seq_points && cfg->method == method) {
3676 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3678 ins->flags |= MONO_INST_NONEMPTY_STACK;
3679 MONO_ADD_INS (cfg->cbb, ins);
3684 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3686 if (mini_get_debug_options ()->better_cast_details) {
3687 int vtable_reg = alloc_preg (cfg);
3688 int klass_reg = alloc_preg (cfg);
3689 MonoBasicBlock *is_null_bb = NULL;
3691 int to_klass_reg, context_used;
3694 NEW_BBLOCK (cfg, is_null_bb);
3696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3700 tls_get = mono_get_jit_tls_intrinsic (cfg);
3702 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3706 MONO_ADD_INS (cfg->cbb, tls_get);
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3712 context_used = mini_class_check_context_used (cfg, klass);
3714 MonoInst *class_ins;
3716 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3717 to_klass_reg = class_ins->dreg;
3719 to_klass_reg = alloc_preg (cfg);
3720 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3725 MONO_START_BB (cfg, is_null_bb);
3727 *out_bblock = cfg->cbb;
3733 reset_cast_details (MonoCompile *cfg)
3735 /* Reset the variables holding the cast details */
3736 if (mini_get_debug_options ()->better_cast_details) {
3737 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3739 MONO_ADD_INS (cfg->cbb, tls_get);
3740 /* It is enough to reset the from field */
3741 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3746 * On return the caller must check @array_class for load errors
3749 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3751 int vtable_reg = alloc_preg (cfg);
3754 context_used = mini_class_check_context_used (cfg, array_class);
3756 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3758 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3760 if (cfg->opt & MONO_OPT_SHARED) {
3761 int class_reg = alloc_preg (cfg);
3762 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3763 if (cfg->compile_aot) {
3764 int klass_reg = alloc_preg (cfg);
3765 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3766 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3770 } else if (context_used) {
3771 MonoInst *vtable_ins;
3773 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3774 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3776 if (cfg->compile_aot) {
3780 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3782 vt_reg = alloc_preg (cfg);
3783 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3784 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3787 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3793 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3795 reset_cast_details (cfg);
3799 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3800 * generic code is generated.
3803 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3805 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3808 MonoInst *rgctx, *addr;
3810 /* FIXME: What if the class is shared? We might not
3811 have to get the address of the method from the
3813 addr = emit_get_rgctx_method (cfg, context_used, method,
3814 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3816 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3818 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3820 gboolean pass_vtable, pass_mrgctx;
3821 MonoInst *rgctx_arg = NULL;
3823 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3824 g_assert (!pass_mrgctx);
3827 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3830 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3833 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3838 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3842 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3843 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3844 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3845 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3847 obj_reg = sp [0]->dreg;
3848 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3849 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3851 /* FIXME: generics */
3852 g_assert (klass->rank == 0);
3855 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3856 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3858 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3862 MonoInst *element_class;
3864 /* This assertion is from the unboxcast insn */
3865 g_assert (klass->rank == 0);
3867 element_class = emit_get_rgctx_klass (cfg, context_used,
3868 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3870 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3871 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3873 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3874 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3875 reset_cast_details (cfg);
3878 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3879 MONO_ADD_INS (cfg->cbb, add);
3880 add->type = STACK_MP;
3887 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3889 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3890 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3894 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3900 args [1] = klass_inst;
3903 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3905 NEW_BBLOCK (cfg, is_ref_bb);
3906 NEW_BBLOCK (cfg, is_nullable_bb);
3907 NEW_BBLOCK (cfg, end_bb);
3908 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3910 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3912 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3913 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3915 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3916 addr_reg = alloc_dreg (cfg, STACK_MP);
3920 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3921 MONO_ADD_INS (cfg->cbb, addr);
3923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3926 MONO_START_BB (cfg, is_ref_bb);
3928 /* Save the ref to a temporary */
3929 dreg = alloc_ireg (cfg);
3930 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3931 addr->dreg = addr_reg;
3932 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3936 MONO_START_BB (cfg, is_nullable_bb);
3939 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3940 MonoInst *unbox_call;
3941 MonoMethodSignature *unbox_sig;
3943 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3944 unbox_sig->ret = &klass->byval_arg;
3945 unbox_sig->param_count = 1;
3946 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3947 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3949 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3950 addr->dreg = addr_reg;
3953 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3956 MONO_START_BB (cfg, end_bb);
3959 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3961 *out_cbb = cfg->cbb;
3967 * Returns NULL and set the cfg exception on error.
3970 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3972 MonoInst *iargs [2];
3978 MonoInst *iargs [2];
3979 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3981 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3983 if (cfg->opt & MONO_OPT_SHARED)
3984 rgctx_info = MONO_RGCTX_INFO_KLASS;
3986 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3987 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3989 if (cfg->opt & MONO_OPT_SHARED) {
3990 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3992 alloc_ftn = mono_object_new;
3995 alloc_ftn = mono_object_new_specific;
3998 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3999 if (known_instance_size) {
4000 int size = mono_class_instance_size (klass);
4002 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4004 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4007 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4010 if (cfg->opt & MONO_OPT_SHARED) {
4011 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4012 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4014 alloc_ftn = mono_object_new;
4015 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4016 /* This happens often in argument checking code, eg. throw new FooException... */
4017 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4018 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4019 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4021 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4022 MonoMethod *managed_alloc = NULL;
4026 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4027 cfg->exception_ptr = klass;
4031 #ifndef MONO_CROSS_COMPILE
4032 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4035 if (managed_alloc) {
4036 int size = mono_class_instance_size (klass);
4038 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4039 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4040 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4042 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4044 guint32 lw = vtable->klass->instance_size;
4045 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4046 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4047 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4050 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4054 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4058 * Returns NULL and set the cfg exception on error.
4061 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4063 MonoInst *alloc, *ins;
4065 *out_cbb = cfg->cbb;
4067 if (mono_class_is_nullable (klass)) {
4068 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4071 /* FIXME: What if the class is shared? We might not
4072 have to get the method address from the RGCTX. */
4073 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4074 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4075 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4077 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4079 gboolean pass_vtable, pass_mrgctx;
4080 MonoInst *rgctx_arg = NULL;
4082 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4083 g_assert (!pass_mrgctx);
4086 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4089 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4092 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4096 if (mini_is_gsharedvt_klass (cfg, klass)) {
4097 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4098 MonoInst *res, *is_ref, *src_var, *addr;
4101 dreg = alloc_ireg (cfg);
4103 NEW_BBLOCK (cfg, is_ref_bb);
4104 NEW_BBLOCK (cfg, is_nullable_bb);
4105 NEW_BBLOCK (cfg, end_bb);
4106 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4108 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4110 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4111 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4114 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4117 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4118 ins->opcode = OP_STOREV_MEMBASE;
4120 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4121 res->type = STACK_OBJ;
4123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4126 MONO_START_BB (cfg, is_ref_bb);
4128 /* val is a vtype, so has to load the value manually */
4129 src_var = get_vreg_to_inst (cfg, val->dreg);
4131 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4132 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4137 MONO_START_BB (cfg, is_nullable_bb);
4140 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4141 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4143 MonoMethodSignature *box_sig;
4146 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4147 * construct that method at JIT time, so have to do things by hand.
4149 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4150 box_sig->ret = &mono_defaults.object_class->byval_arg;
4151 box_sig->param_count = 1;
4152 box_sig->params [0] = &klass->byval_arg;
4153 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4154 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4155 res->type = STACK_OBJ;
4159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4161 MONO_START_BB (cfg, end_bb);
4163 *out_cbb = cfg->cbb;
4167 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4171 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4177 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4180 MonoGenericContainer *container;
4181 MonoGenericInst *ginst;
4183 if (klass->generic_class) {
4184 container = klass->generic_class->container_class->generic_container;
4185 ginst = klass->generic_class->context.class_inst;
4186 } else if (klass->generic_container && context_used) {
4187 container = klass->generic_container;
4188 ginst = container->context.class_inst;
4193 for (i = 0; i < container->type_argc; ++i) {
4195 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4197 type = ginst->type_argv [i];
4198 if (mini_type_is_reference (cfg, type))
4204 static GHashTable* direct_icall_type_hash;
4207 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4209 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4210 if (!direct_icalls_enabled (cfg))
4214 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4215 * Whitelist a few icalls for now.
4217 if (!direct_icall_type_hash) {
4218 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4220 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4221 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4222 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4223 mono_memory_barrier ();
4224 direct_icall_type_hash = h;
4227 if (cmethod->klass == mono_defaults.math_class)
4229 /* No locking needed */
4230 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4235 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4238 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4240 MonoMethod *mono_castclass;
4243 mono_castclass = mono_marshal_get_castclass_with_cache ();
4245 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4246 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4247 reset_cast_details (cfg);
4248 *out_bblock = cfg->cbb;
4254 get_castclass_cache_idx (MonoCompile *cfg)
4256 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4257 cfg->castclass_cache_index ++;
4258 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4262 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4271 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4274 if (cfg->compile_aot) {
4275 idx = get_castclass_cache_idx (cfg);
4276 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4278 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4281 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4283 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4287 * Returns NULL and set the cfg exception on error.
4290 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4292 MonoBasicBlock *is_null_bb;
4293 int obj_reg = src->dreg;
4294 int vtable_reg = alloc_preg (cfg);
4296 MonoInst *klass_inst = NULL, *res;
4297 MonoBasicBlock *bblock;
4301 context_used = mini_class_check_context_used (cfg, klass);
4303 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4304 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4305 (*inline_costs) += 2;
4308 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4309 MonoMethod *mono_castclass;
4310 MonoInst *iargs [1];
4313 mono_castclass = mono_marshal_get_castclass (klass);
4316 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4317 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4318 iargs, ip, cfg->real_offset, TRUE, &bblock);
4319 reset_cast_details (cfg);
4320 CHECK_CFG_EXCEPTION;
4321 g_assert (costs > 0);
4323 cfg->real_offset += 5;
4325 (*inline_costs) += costs;
4334 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4335 MonoInst *cache_ins;
4337 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4342 /* klass - it's the second element of the cache entry*/
4343 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4346 args [2] = cache_ins;
4348 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4351 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4354 NEW_BBLOCK (cfg, is_null_bb);
4356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4359 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4361 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4363 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4365 int klass_reg = alloc_preg (cfg);
4367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4369 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4370 /* the remoting code is broken, access the class for now */
4371 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4372 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4374 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4375 cfg->exception_ptr = klass;
4378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4380 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4383 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4386 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4390 MONO_START_BB (cfg, is_null_bb);
4392 reset_cast_details (cfg);
4403 * Returns NULL and set the cfg exception on error.
4406 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4409 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4410 int obj_reg = src->dreg;
4411 int vtable_reg = alloc_preg (cfg);
4412 int res_reg = alloc_ireg_ref (cfg);
4413 MonoInst *klass_inst = NULL;
4418 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4419 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4420 MonoInst *cache_ins;
4422 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4427 /* klass - it's the second element of the cache entry*/
4428 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4431 args [2] = cache_ins;
4433 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4436 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4439 NEW_BBLOCK (cfg, is_null_bb);
4440 NEW_BBLOCK (cfg, false_bb);
4441 NEW_BBLOCK (cfg, end_bb);
4443 /* Do the assignment at the beginning, so the other assignment can be if converted */
4444 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4445 ins->type = STACK_OBJ;
4448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4449 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4453 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4454 g_assert (!context_used);
4455 /* the is_null_bb target simply copies the input register to the output */
4456 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4458 int klass_reg = alloc_preg (cfg);
4461 int rank_reg = alloc_preg (cfg);
4462 int eclass_reg = alloc_preg (cfg);
4464 g_assert (!context_used);
4465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4467 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4470 if (klass->cast_class == mono_defaults.object_class) {
4471 int parent_reg = alloc_preg (cfg);
4472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4473 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4474 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4475 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4476 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4477 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4478 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4479 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4480 } else if (klass->cast_class == mono_defaults.enum_class) {
4481 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4482 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4483 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4484 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4486 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4487 /* Check that the object is a vector too */
4488 int bounds_reg = alloc_preg (cfg);
4489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4491 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4494 /* the is_null_bb target simply copies the input register to the output */
4495 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4497 } else if (mono_class_is_nullable (klass)) {
4498 g_assert (!context_used);
4499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4500 /* the is_null_bb target simply copies the input register to the output */
4501 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4503 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4504 g_assert (!context_used);
4505 /* the remoting code is broken, access the class for now */
4506 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4507 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4509 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4510 cfg->exception_ptr = klass;
4513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4522 /* the is_null_bb target simply copies the input register to the output */
4523 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4528 MONO_START_BB (cfg, false_bb);
4530 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4531 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4533 MONO_START_BB (cfg, is_null_bb);
4535 MONO_START_BB (cfg, end_bb);
4541 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4543 /* This opcode takes as input an object reference and a class, and returns:
4544 0) if the object is an instance of the class,
4545 1) if the object is not instance of the class,
4546 2) if the object is a proxy whose type cannot be determined */
4549 #ifndef DISABLE_REMOTING
4550 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4552 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4554 int obj_reg = src->dreg;
4555 int dreg = alloc_ireg (cfg);
4557 #ifndef DISABLE_REMOTING
4558 int klass_reg = alloc_preg (cfg);
4561 NEW_BBLOCK (cfg, true_bb);
4562 NEW_BBLOCK (cfg, false_bb);
4563 NEW_BBLOCK (cfg, end_bb);
4564 #ifndef DISABLE_REMOTING
4565 NEW_BBLOCK (cfg, false2_bb);
4566 NEW_BBLOCK (cfg, no_proxy_bb);
4569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4572 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4573 #ifndef DISABLE_REMOTING
4574 NEW_BBLOCK (cfg, interface_fail_bb);
4577 tmp_reg = alloc_preg (cfg);
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4579 #ifndef DISABLE_REMOTING
4580 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4581 MONO_START_BB (cfg, interface_fail_bb);
4582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4584 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4586 tmp_reg = alloc_preg (cfg);
4587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4591 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4594 #ifndef DISABLE_REMOTING
4595 tmp_reg = alloc_preg (cfg);
4596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4599 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4600 tmp_reg = alloc_preg (cfg);
4601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4604 tmp_reg = alloc_preg (cfg);
4605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4609 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4612 MONO_START_BB (cfg, no_proxy_bb);
4614 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4616 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4620 MONO_START_BB (cfg, false_bb);
4622 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4625 #ifndef DISABLE_REMOTING
4626 MONO_START_BB (cfg, false2_bb);
4628 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4629 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4632 MONO_START_BB (cfg, true_bb);
4634 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4636 MONO_START_BB (cfg, end_bb);
4639 MONO_INST_NEW (cfg, ins, OP_ICONST);
4641 ins->type = STACK_I4;
4647 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4649 /* This opcode takes as input an object reference and a class, and returns:
4650 0) if the object is an instance of the class,
4651 1) if the object is a proxy whose type cannot be determined
4652 an InvalidCastException exception is thrown otherwhise*/
4655 #ifndef DISABLE_REMOTING
4656 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4658 MonoBasicBlock *ok_result_bb;
4660 int obj_reg = src->dreg;
4661 int dreg = alloc_ireg (cfg);
4662 int tmp_reg = alloc_preg (cfg);
4664 #ifndef DISABLE_REMOTING
4665 int klass_reg = alloc_preg (cfg);
4666 NEW_BBLOCK (cfg, end_bb);
4669 NEW_BBLOCK (cfg, ok_result_bb);
4671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4674 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4676 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4677 #ifndef DISABLE_REMOTING
4678 NEW_BBLOCK (cfg, interface_fail_bb);
4680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4681 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4682 MONO_START_BB (cfg, interface_fail_bb);
4683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4685 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4687 tmp_reg = alloc_preg (cfg);
4688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4690 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4692 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4696 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4700 #ifndef DISABLE_REMOTING
4701 NEW_BBLOCK (cfg, no_proxy_bb);
4703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4705 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4707 tmp_reg = alloc_preg (cfg);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4711 tmp_reg = alloc_preg (cfg);
4712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4716 NEW_BBLOCK (cfg, fail_1_bb);
4718 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4720 MONO_START_BB (cfg, fail_1_bb);
4722 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4725 MONO_START_BB (cfg, no_proxy_bb);
4727 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4729 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4733 MONO_START_BB (cfg, ok_result_bb);
4735 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4737 #ifndef DISABLE_REMOTING
4738 MONO_START_BB (cfg, end_bb);
4742 MONO_INST_NEW (cfg, ins, OP_ICONST);
4744 ins->type = STACK_I4;
4749 static G_GNUC_UNUSED MonoInst*
4750 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4752 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4753 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4756 switch (enum_type->type) {
4759 #if SIZEOF_REGISTER == 8
4771 MonoInst *load, *and, *cmp, *ceq;
4772 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4773 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4774 int dest_reg = alloc_ireg (cfg);
4776 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4777 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4778 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4779 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4781 ceq->type = STACK_I4;
4784 load = mono_decompose_opcode (cfg, load, NULL);
4785 and = mono_decompose_opcode (cfg, and, NULL);
4786 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4787 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4795 * Returns NULL and set the cfg exception on error.
4797 static G_GNUC_UNUSED MonoInst*
4798 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4802 gpointer trampoline;
4803 MonoInst *obj, *method_ins, *tramp_ins;
4808 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4811 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4815 obj = handle_alloc (cfg, klass, FALSE, 0);
4819 /* Inline the contents of mono_delegate_ctor */
4821 /* Set target field */
4822 /* Optimize away setting of NULL target */
4823 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4825 if (cfg->gen_write_barriers) {
4826 dreg = alloc_preg (cfg);
4827 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4828 emit_write_barrier (cfg, ptr, target);
4832 /* Set method field */
4833 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4834 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4837 * To avoid looking up the compiled code belonging to the target method
4838 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4839 * store it, and we fill it after the method has been compiled.
4841 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4842 MonoInst *code_slot_ins;
4845 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4847 domain = mono_domain_get ();
4848 mono_domain_lock (domain);
4849 if (!domain_jit_info (domain)->method_code_hash)
4850 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4851 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4853 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4854 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4856 mono_domain_unlock (domain);
4858 if (cfg->compile_aot)
4859 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4861 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4863 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4866 if (cfg->compile_aot) {
4867 MonoDelegateClassMethodPair *del_tramp;
4869 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4870 del_tramp->klass = klass;
4871 del_tramp->method = context_used ? NULL : method;
4872 del_tramp->virtual = virtual;
4873 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4876 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4878 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4879 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4882 /* Set invoke_impl field */
4884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4886 dreg = alloc_preg (cfg);
4887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4890 dreg = alloc_preg (cfg);
4891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4895 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4901 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4903 MonoJitICallInfo *info;
4905 /* Need to register the icall so it gets an icall wrapper */
4906 info = mono_get_array_new_va_icall (rank);
4908 cfg->flags |= MONO_CFG_HAS_VARARGS;
4910 /* mono_array_new_va () needs a vararg calling convention */
4911 cfg->disable_llvm = TRUE;
4913 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4914 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4918 * handle_constrained_gsharedvt_call:
4920 * Handle constrained calls where the receiver is a gsharedvt type.
4921 * Return the instruction representing the call. Set the cfg exception on failure.
4924 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4925 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4927 MonoInst *ins = NULL;
4928 MonoBasicBlock *bblock = *ref_bblock;
4929 gboolean emit_widen = *ref_emit_widen;
4932 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4933 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4934 * pack the arguments into an array, and do the rest of the work in in an icall.
4936 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4937 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4938 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4939 MonoInst *args [16];
4942 * This case handles calls to
4943 * - object:ToString()/Equals()/GetHashCode(),
4944 * - System.IComparable<T>:CompareTo()
4945 * - System.IEquatable<T>:Equals ()
4946 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4950 if (mono_method_check_context_used (cmethod))
4951 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4953 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4954 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4956 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4957 if (fsig->hasthis && fsig->param_count) {
4958 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4959 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4960 ins->dreg = alloc_preg (cfg);
4961 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4962 MONO_ADD_INS (cfg->cbb, ins);
4965 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4968 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4970 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4971 addr_reg = ins->dreg;
4972 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4974 EMIT_NEW_ICONST (cfg, args [3], 0);
4975 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4978 EMIT_NEW_ICONST (cfg, args [3], 0);
4979 EMIT_NEW_ICONST (cfg, args [4], 0);
4981 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4984 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4985 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4986 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4990 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4991 MONO_ADD_INS (cfg->cbb, add);
4993 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4994 MONO_ADD_INS (cfg->cbb, ins);
4995 /* ins represents the call result */
4998 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5001 *ref_emit_widen = emit_widen;
5002 *ref_bblock = bblock;
5011 mono_emit_load_got_addr (MonoCompile *cfg)
5013 MonoInst *getaddr, *dummy_use;
5015 if (!cfg->got_var || cfg->got_var_allocated)
5018 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5019 getaddr->cil_code = cfg->header->code;
5020 getaddr->dreg = cfg->got_var->dreg;
5022 /* Add it to the start of the first bblock */
5023 if (cfg->bb_entry->code) {
5024 getaddr->next = cfg->bb_entry->code;
5025 cfg->bb_entry->code = getaddr;
5028 MONO_ADD_INS (cfg->bb_entry, getaddr);
5030 cfg->got_var_allocated = TRUE;
5033 * Add a dummy use to keep the got_var alive, since real uses might
5034 * only be generated by the back ends.
5035 * Add it to end_bblock, so the variable's lifetime covers the whole
5037 * It would be better to make the usage of the got var explicit in all
5038 * cases when the backend needs it (i.e. calls, throw etc.), so this
5039 * wouldn't be needed.
5041 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5042 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5045 static int inline_limit;
5046 static gboolean inline_limit_inited;
5049 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5051 MonoMethodHeaderSummary header;
5053 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5054 MonoMethodSignature *sig = mono_method_signature (method);
5058 if (cfg->disable_inline)
5060 if (cfg->generic_sharing_context)
5063 if (cfg->inline_depth > 10)
5066 #ifdef MONO_ARCH_HAVE_LMF_OPS
5067 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5068 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5069 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5074 if (!mono_method_get_header_summary (method, &header))
5077 /*runtime, icall and pinvoke are checked by summary call*/
5078 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5079 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5080 (mono_class_is_marshalbyref (method->klass)) ||
5084 /* also consider num_locals? */
5085 /* Do the size check early to avoid creating vtables */
5086 if (!inline_limit_inited) {
5087 if (g_getenv ("MONO_INLINELIMIT"))
5088 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5090 inline_limit = INLINE_LENGTH_LIMIT;
5091 inline_limit_inited = TRUE;
5093 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5097 * if we can initialize the class of the method right away, we do,
5098 * otherwise we don't allow inlining if the class needs initialization,
5099 * since it would mean inserting a call to mono_runtime_class_init()
5100 * inside the inlined code
5102 if (!(cfg->opt & MONO_OPT_SHARED)) {
5103 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5104 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5105 vtable = mono_class_vtable (cfg->domain, method->klass);
5108 if (!cfg->compile_aot)
5109 mono_runtime_class_init (vtable);
5110 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5111 if (cfg->run_cctors && method->klass->has_cctor) {
5112 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5113 if (!method->klass->runtime_info)
5114 /* No vtable created yet */
5116 vtable = mono_class_vtable (cfg->domain, method->klass);
5119 /* This makes so that inline cannot trigger */
5120 /* .cctors: too many apps depend on them */
5121 /* running with a specific order... */
5122 if (! vtable->initialized)
5124 mono_runtime_class_init (vtable);
5126 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5127 if (!method->klass->runtime_info)
5128 /* No vtable created yet */
5130 vtable = mono_class_vtable (cfg->domain, method->klass);
5133 if (!vtable->initialized)
5138 * If we're compiling for shared code
5139 * the cctor will need to be run at aot method load time, for example,
5140 * or at the end of the compilation of the inlining method.
5142 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5146 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5147 if (mono_arch_is_soft_float ()) {
5149 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5151 for (i = 0; i < sig->param_count; ++i)
5152 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5157 if (g_list_find (cfg->dont_inline, method))
5164 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5166 if (!cfg->compile_aot) {
5168 if (vtable->initialized)
5172 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5173 if (cfg->method == method)
5177 if (!mono_class_needs_cctor_run (klass, method))
5180 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5181 /* The initialization is already done before the method is called */
5188 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5192 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5195 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5198 mono_class_init (klass);
5199 size = mono_class_array_element_size (klass);
5202 mult_reg = alloc_preg (cfg);
5203 array_reg = arr->dreg;
5204 index_reg = index->dreg;
5206 #if SIZEOF_REGISTER == 8
5207 /* The array reg is 64 bits but the index reg is only 32 */
5208 if (COMPILE_LLVM (cfg)) {
5210 index2_reg = index_reg;
5212 index2_reg = alloc_preg (cfg);
5213 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5216 if (index->type == STACK_I8) {
5217 index2_reg = alloc_preg (cfg);
5218 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5220 index2_reg = index_reg;
5225 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5227 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5228 if (size == 1 || size == 2 || size == 4 || size == 8) {
5229 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5231 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5232 ins->klass = mono_class_get_element_class (klass);
5233 ins->type = STACK_MP;
5239 add_reg = alloc_ireg_mp (cfg);
5242 MonoInst *rgctx_ins;
5245 g_assert (cfg->generic_sharing_context);
5246 context_used = mini_class_check_context_used (cfg, klass);
5247 g_assert (context_used);
5248 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5249 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5251 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5253 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5254 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5255 ins->klass = mono_class_get_element_class (klass);
5256 ins->type = STACK_MP;
5257 MONO_ADD_INS (cfg->cbb, ins);
5262 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5264 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5266 int bounds_reg = alloc_preg (cfg);
5267 int add_reg = alloc_ireg_mp (cfg);
5268 int mult_reg = alloc_preg (cfg);
5269 int mult2_reg = alloc_preg (cfg);
5270 int low1_reg = alloc_preg (cfg);
5271 int low2_reg = alloc_preg (cfg);
5272 int high1_reg = alloc_preg (cfg);
5273 int high2_reg = alloc_preg (cfg);
5274 int realidx1_reg = alloc_preg (cfg);
5275 int realidx2_reg = alloc_preg (cfg);
5276 int sum_reg = alloc_preg (cfg);
5277 int index1, index2, tmpreg;
5281 mono_class_init (klass);
5282 size = mono_class_array_element_size (klass);
5284 index1 = index_ins1->dreg;
5285 index2 = index_ins2->dreg;
5287 #if SIZEOF_REGISTER == 8
5288 /* The array reg is 64 bits but the index reg is only 32 */
5289 if (COMPILE_LLVM (cfg)) {
5292 tmpreg = alloc_preg (cfg);
5293 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5295 tmpreg = alloc_preg (cfg);
5296 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5300 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5304 /* range checking */
5305 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5306 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5308 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5309 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5310 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5311 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5312 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5313 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5314 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5316 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5317 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5318 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5319 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5320 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5321 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5322 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5324 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5325 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5327 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5328 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5330 ins->type = STACK_MP;
5332 MONO_ADD_INS (cfg->cbb, ins);
5339 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5343 MonoMethod *addr_method;
5345 MonoClass *eclass = cmethod->klass->element_class;
5347 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5350 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5352 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5353 /* emit_ldelema_2 depends on OP_LMUL */
5354 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5355 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5359 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5362 element_size = mono_class_array_element_size (eclass);
5363 addr_method = mono_marshal_get_array_address (rank, element_size);
5364 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5369 static MonoBreakPolicy
5370 always_insert_breakpoint (MonoMethod *method)
5372 return MONO_BREAK_POLICY_ALWAYS;
5375 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5378 * mono_set_break_policy:
5379 * policy_callback: the new callback function
5381 * Allow embedders to decide wherther to actually obey breakpoint instructions
5382 * (both break IL instructions and Debugger.Break () method calls), for example
5383 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5384 * untrusted or semi-trusted code.
5386 * @policy_callback will be called every time a break point instruction needs to
5387 * be inserted with the method argument being the method that calls Debugger.Break()
5388 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5389 * if it wants the breakpoint to not be effective in the given method.
5390 * #MONO_BREAK_POLICY_ALWAYS is the default.
5393 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5395 if (policy_callback)
5396 break_policy_func = policy_callback;
5398 break_policy_func = always_insert_breakpoint;
5402 should_insert_brekpoint (MonoMethod *method) {
5403 switch (break_policy_func (method)) {
5404 case MONO_BREAK_POLICY_ALWAYS:
5406 case MONO_BREAK_POLICY_NEVER:
5408 case MONO_BREAK_POLICY_ON_DBG:
5409 g_warning ("mdb no longer supported");
5412 g_warning ("Incorrect value returned from break policy callback");
5417 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5419 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5421 MonoInst *addr, *store, *load;
5422 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5424 /* the bounds check is already done by the callers */
5425 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5427 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5428 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5429 if (mini_type_is_reference (cfg, fsig->params [2]))
5430 emit_write_barrier (cfg, addr, load);
5432 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5433 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5440 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5442 return mini_type_is_reference (cfg, &klass->byval_arg);
5446 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5448 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5449 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5450 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5451 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5452 MonoInst *iargs [3];
5455 mono_class_setup_vtable (obj_array);
5456 g_assert (helper->slot);
5458 if (sp [0]->type != STACK_OBJ)
5460 if (sp [2]->type != STACK_OBJ)
5467 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5471 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5474 // FIXME-VT: OP_ICONST optimization
5475 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5476 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5477 ins->opcode = OP_STOREV_MEMBASE;
5478 } else if (sp [1]->opcode == OP_ICONST) {
5479 int array_reg = sp [0]->dreg;
5480 int index_reg = sp [1]->dreg;
5481 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5484 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5485 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5487 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5488 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5489 if (generic_class_is_reference_type (cfg, klass))
5490 emit_write_barrier (cfg, addr, sp [2]);
5497 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5502 eklass = mono_class_from_mono_type (fsig->params [2]);
5504 eklass = mono_class_from_mono_type (fsig->ret);
5507 return emit_array_store (cfg, eklass, args, FALSE);
5509 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5510 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5516 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5520 //Only allow for valuetypes
5521 if (!param_klass->valuetype || !return_klass->valuetype)
5525 if (param_klass->has_references || return_klass->has_references)
5528 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5529 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5530 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5533 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5534 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5537 //And have the same size
5538 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5544 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5546 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5547 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5549 //Valuetypes that are semantically equivalent
5550 if (is_unsafe_mov_compatible (param_klass, return_klass))
5553 //Arrays of valuetypes that are semantically equivalent
5554 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5561 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5563 #ifdef MONO_ARCH_SIMD_INTRINSICS
5564 MonoInst *ins = NULL;
5566 if (cfg->opt & MONO_OPT_SIMD) {
5567 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5573 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5577 emit_memory_barrier (MonoCompile *cfg, int kind)
5579 MonoInst *ins = NULL;
5580 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5581 MONO_ADD_INS (cfg->cbb, ins);
5582 ins->backend.memory_barrier_kind = kind;
5588 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5590 MonoInst *ins = NULL;
5593 /* The LLVM backend supports these intrinsics */
5594 if (cmethod->klass == mono_defaults.math_class) {
5595 if (strcmp (cmethod->name, "Sin") == 0) {
5597 } else if (strcmp (cmethod->name, "Cos") == 0) {
5599 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5601 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5605 if (opcode && fsig->param_count == 1) {
5606 MONO_INST_NEW (cfg, ins, opcode);
5607 ins->type = STACK_R8;
5608 ins->dreg = mono_alloc_freg (cfg);
5609 ins->sreg1 = args [0]->dreg;
5610 MONO_ADD_INS (cfg->cbb, ins);
5614 if (cfg->opt & MONO_OPT_CMOV) {
5615 if (strcmp (cmethod->name, "Min") == 0) {
5616 if (fsig->params [0]->type == MONO_TYPE_I4)
5618 if (fsig->params [0]->type == MONO_TYPE_U4)
5619 opcode = OP_IMIN_UN;
5620 else if (fsig->params [0]->type == MONO_TYPE_I8)
5622 else if (fsig->params [0]->type == MONO_TYPE_U8)
5623 opcode = OP_LMIN_UN;
5624 } else if (strcmp (cmethod->name, "Max") == 0) {
5625 if (fsig->params [0]->type == MONO_TYPE_I4)
5627 if (fsig->params [0]->type == MONO_TYPE_U4)
5628 opcode = OP_IMAX_UN;
5629 else if (fsig->params [0]->type == MONO_TYPE_I8)
5631 else if (fsig->params [0]->type == MONO_TYPE_U8)
5632 opcode = OP_LMAX_UN;
5636 if (opcode && fsig->param_count == 2) {
5637 MONO_INST_NEW (cfg, ins, opcode);
5638 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5639 ins->dreg = mono_alloc_ireg (cfg);
5640 ins->sreg1 = args [0]->dreg;
5641 ins->sreg2 = args [1]->dreg;
5642 MONO_ADD_INS (cfg->cbb, ins);
5650 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5652 if (cmethod->klass == mono_defaults.array_class) {
5653 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5654 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5655 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5656 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5657 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5658 return emit_array_unsafe_mov (cfg, fsig, args);
5665 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5667 MonoInst *ins = NULL;
5669 static MonoClass *runtime_helpers_class = NULL;
5670 if (! runtime_helpers_class)
5671 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5672 "System.Runtime.CompilerServices", "RuntimeHelpers");
5674 if (cmethod->klass == mono_defaults.string_class) {
5675 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5676 int dreg = alloc_ireg (cfg);
5677 int index_reg = alloc_preg (cfg);
5678 int add_reg = alloc_preg (cfg);
5680 #if SIZEOF_REGISTER == 8
5681 /* The array reg is 64 bits but the index reg is only 32 */
5682 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5684 index_reg = args [1]->dreg;
5686 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5688 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5689 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5690 add_reg = ins->dreg;
5691 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5694 int mult_reg = alloc_preg (cfg);
5695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5696 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5697 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5698 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5700 type_from_op (cfg, ins, NULL, NULL);
5702 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5703 int dreg = alloc_ireg (cfg);
5704 /* Decompose later to allow more optimizations */
5705 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5706 ins->type = STACK_I4;
5707 ins->flags |= MONO_INST_FAULT;
5708 cfg->cbb->has_array_access = TRUE;
5709 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5714 } else if (cmethod->klass == mono_defaults.object_class) {
5716 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5717 int dreg = alloc_ireg_ref (cfg);
5718 int vt_reg = alloc_preg (cfg);
5719 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5720 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5721 type_from_op (cfg, ins, NULL, NULL);
5724 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5725 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5726 int dreg = alloc_ireg (cfg);
5727 int t1 = alloc_ireg (cfg);
5729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5730 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5731 ins->type = STACK_I4;
5735 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5736 MONO_INST_NEW (cfg, ins, OP_NOP);
5737 MONO_ADD_INS (cfg->cbb, ins);
5741 } else if (cmethod->klass == mono_defaults.array_class) {
5742 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5743 return emit_array_generic_access (cfg, fsig, args, FALSE);
5744 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5745 return emit_array_generic_access (cfg, fsig, args, TRUE);
5747 #ifndef MONO_BIG_ARRAYS
5749 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5752 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5753 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5754 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5755 int dreg = alloc_ireg (cfg);
5756 int bounds_reg = alloc_ireg_mp (cfg);
5757 MonoBasicBlock *end_bb, *szarray_bb;
5758 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5760 NEW_BBLOCK (cfg, end_bb);
5761 NEW_BBLOCK (cfg, szarray_bb);
5763 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5764 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5767 /* Non-szarray case */
5769 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5770 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5772 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5773 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5775 MONO_START_BB (cfg, szarray_bb);
5778 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5779 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5781 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5782 MONO_START_BB (cfg, end_bb);
5784 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5785 ins->type = STACK_I4;
5791 if (cmethod->name [0] != 'g')
5794 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5795 int dreg = alloc_ireg (cfg);
5796 int vtable_reg = alloc_preg (cfg);
5797 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5798 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5799 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5800 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5801 type_from_op (cfg, ins, NULL, NULL);
5804 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5805 int dreg = alloc_ireg (cfg);
5807 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5808 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5809 type_from_op (cfg, ins, NULL, NULL);
5814 } else if (cmethod->klass == runtime_helpers_class) {
5816 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5817 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5821 } else if (cmethod->klass == mono_defaults.thread_class) {
5822 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5823 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5824 MONO_ADD_INS (cfg->cbb, ins);
5826 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5827 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5828 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5830 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5832 if (fsig->params [0]->type == MONO_TYPE_I1)
5833 opcode = OP_LOADI1_MEMBASE;
5834 else if (fsig->params [0]->type == MONO_TYPE_U1)
5835 opcode = OP_LOADU1_MEMBASE;
5836 else if (fsig->params [0]->type == MONO_TYPE_I2)
5837 opcode = OP_LOADI2_MEMBASE;
5838 else if (fsig->params [0]->type == MONO_TYPE_U2)
5839 opcode = OP_LOADU2_MEMBASE;
5840 else if (fsig->params [0]->type == MONO_TYPE_I4)
5841 opcode = OP_LOADI4_MEMBASE;
5842 else if (fsig->params [0]->type == MONO_TYPE_U4)
5843 opcode = OP_LOADU4_MEMBASE;
5844 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5845 opcode = OP_LOADI8_MEMBASE;
5846 else if (fsig->params [0]->type == MONO_TYPE_R4)
5847 opcode = OP_LOADR4_MEMBASE;
5848 else if (fsig->params [0]->type == MONO_TYPE_R8)
5849 opcode = OP_LOADR8_MEMBASE;
5850 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5851 opcode = OP_LOAD_MEMBASE;
5854 MONO_INST_NEW (cfg, ins, opcode);
5855 ins->inst_basereg = args [0]->dreg;
5856 ins->inst_offset = 0;
5857 MONO_ADD_INS (cfg->cbb, ins);
5859 switch (fsig->params [0]->type) {
5866 ins->dreg = mono_alloc_ireg (cfg);
5867 ins->type = STACK_I4;
5871 ins->dreg = mono_alloc_lreg (cfg);
5872 ins->type = STACK_I8;
5876 ins->dreg = mono_alloc_ireg (cfg);
5877 #if SIZEOF_REGISTER == 8
5878 ins->type = STACK_I8;
5880 ins->type = STACK_I4;
5885 ins->dreg = mono_alloc_freg (cfg);
5886 ins->type = STACK_R8;
5889 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5890 ins->dreg = mono_alloc_ireg_ref (cfg);
5891 ins->type = STACK_OBJ;
5895 if (opcode == OP_LOADI8_MEMBASE)
5896 ins = mono_decompose_opcode (cfg, ins, NULL);
5898 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5902 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5904 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5906 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5907 opcode = OP_STOREI1_MEMBASE_REG;
5908 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5909 opcode = OP_STOREI2_MEMBASE_REG;
5910 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5911 opcode = OP_STOREI4_MEMBASE_REG;
5912 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5913 opcode = OP_STOREI8_MEMBASE_REG;
5914 else if (fsig->params [0]->type == MONO_TYPE_R4)
5915 opcode = OP_STORER4_MEMBASE_REG;
5916 else if (fsig->params [0]->type == MONO_TYPE_R8)
5917 opcode = OP_STORER8_MEMBASE_REG;
5918 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5919 opcode = OP_STORE_MEMBASE_REG;
5922 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5924 MONO_INST_NEW (cfg, ins, opcode);
5925 ins->sreg1 = args [1]->dreg;
5926 ins->inst_destbasereg = args [0]->dreg;
5927 ins->inst_offset = 0;
5928 MONO_ADD_INS (cfg->cbb, ins);
5930 if (opcode == OP_STOREI8_MEMBASE_REG)
5931 ins = mono_decompose_opcode (cfg, ins, NULL);
5936 } else if (cmethod->klass == mono_defaults.monitor_class) {
5937 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5938 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5941 if (COMPILE_LLVM (cfg)) {
5943 * Pass the argument normally, the LLVM backend will handle the
5944 * calling convention problems.
5946 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5948 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5949 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5950 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5951 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5954 return (MonoInst*)call;
5955 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5956 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5959 if (COMPILE_LLVM (cfg)) {
5961 * Pass the argument normally, the LLVM backend will handle the
5962 * calling convention problems.
5964 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5966 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5967 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5968 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5969 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5972 return (MonoInst*)call;
5974 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5977 if (COMPILE_LLVM (cfg)) {
5978 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5980 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5981 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5982 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5983 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5986 return (MonoInst*)call;
5989 } else if (cmethod->klass->image == mono_defaults.corlib &&
5990 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5991 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5994 #if SIZEOF_REGISTER == 8
5995 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5996 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5997 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5998 ins->dreg = mono_alloc_preg (cfg);
5999 ins->sreg1 = args [0]->dreg;
6000 ins->type = STACK_I8;
6001 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6002 MONO_ADD_INS (cfg->cbb, ins);
6006 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6008 /* 64 bit reads are already atomic */
6009 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6010 load_ins->dreg = mono_alloc_preg (cfg);
6011 load_ins->inst_basereg = args [0]->dreg;
6012 load_ins->inst_offset = 0;
6013 load_ins->type = STACK_I8;
6014 MONO_ADD_INS (cfg->cbb, load_ins);
6016 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6023 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6024 MonoInst *ins_iconst;
6027 if (fsig->params [0]->type == MONO_TYPE_I4) {
6028 opcode = OP_ATOMIC_ADD_I4;
6029 cfg->has_atomic_add_i4 = TRUE;
6031 #if SIZEOF_REGISTER == 8
6032 else if (fsig->params [0]->type == MONO_TYPE_I8)
6033 opcode = OP_ATOMIC_ADD_I8;
6036 if (!mono_arch_opcode_supported (opcode))
6038 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6039 ins_iconst->inst_c0 = 1;
6040 ins_iconst->dreg = mono_alloc_ireg (cfg);
6041 MONO_ADD_INS (cfg->cbb, ins_iconst);
6043 MONO_INST_NEW (cfg, ins, opcode);
6044 ins->dreg = mono_alloc_ireg (cfg);
6045 ins->inst_basereg = args [0]->dreg;
6046 ins->inst_offset = 0;
6047 ins->sreg2 = ins_iconst->dreg;
6048 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6049 MONO_ADD_INS (cfg->cbb, ins);
6051 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6052 MonoInst *ins_iconst;
6055 if (fsig->params [0]->type == MONO_TYPE_I4) {
6056 opcode = OP_ATOMIC_ADD_I4;
6057 cfg->has_atomic_add_i4 = TRUE;
6059 #if SIZEOF_REGISTER == 8
6060 else if (fsig->params [0]->type == MONO_TYPE_I8)
6061 opcode = OP_ATOMIC_ADD_I8;
6064 if (!mono_arch_opcode_supported (opcode))
6066 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6067 ins_iconst->inst_c0 = -1;
6068 ins_iconst->dreg = mono_alloc_ireg (cfg);
6069 MONO_ADD_INS (cfg->cbb, ins_iconst);
6071 MONO_INST_NEW (cfg, ins, opcode);
6072 ins->dreg = mono_alloc_ireg (cfg);
6073 ins->inst_basereg = args [0]->dreg;
6074 ins->inst_offset = 0;
6075 ins->sreg2 = ins_iconst->dreg;
6076 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6077 MONO_ADD_INS (cfg->cbb, ins);
6079 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6082 if (fsig->params [0]->type == MONO_TYPE_I4) {
6083 opcode = OP_ATOMIC_ADD_I4;
6084 cfg->has_atomic_add_i4 = TRUE;
6086 #if SIZEOF_REGISTER == 8
6087 else if (fsig->params [0]->type == MONO_TYPE_I8)
6088 opcode = OP_ATOMIC_ADD_I8;
6091 if (!mono_arch_opcode_supported (opcode))
6093 MONO_INST_NEW (cfg, ins, opcode);
6094 ins->dreg = mono_alloc_ireg (cfg);
6095 ins->inst_basereg = args [0]->dreg;
6096 ins->inst_offset = 0;
6097 ins->sreg2 = args [1]->dreg;
6098 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6099 MONO_ADD_INS (cfg->cbb, ins);
6102 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6103 MonoInst *f2i = NULL, *i2f;
6104 guint32 opcode, f2i_opcode, i2f_opcode;
6105 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6106 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6108 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6109 fsig->params [0]->type == MONO_TYPE_R4) {
6110 opcode = OP_ATOMIC_EXCHANGE_I4;
6111 f2i_opcode = OP_MOVE_F_TO_I4;
6112 i2f_opcode = OP_MOVE_I4_TO_F;
6113 cfg->has_atomic_exchange_i4 = TRUE;
6115 #if SIZEOF_REGISTER == 8
6117 fsig->params [0]->type == MONO_TYPE_I8 ||
6118 fsig->params [0]->type == MONO_TYPE_R8 ||
6119 fsig->params [0]->type == MONO_TYPE_I) {
6120 opcode = OP_ATOMIC_EXCHANGE_I8;
6121 f2i_opcode = OP_MOVE_F_TO_I8;
6122 i2f_opcode = OP_MOVE_I8_TO_F;
6125 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6126 opcode = OP_ATOMIC_EXCHANGE_I4;
6127 cfg->has_atomic_exchange_i4 = TRUE;
6133 if (!mono_arch_opcode_supported (opcode))
6137 /* TODO: Decompose these opcodes instead of bailing here. */
6138 if (COMPILE_SOFT_FLOAT (cfg))
6141 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6142 f2i->dreg = mono_alloc_ireg (cfg);
6143 f2i->sreg1 = args [1]->dreg;
6144 if (f2i_opcode == OP_MOVE_F_TO_I4)
6145 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6146 MONO_ADD_INS (cfg->cbb, f2i);
6149 MONO_INST_NEW (cfg, ins, opcode);
6150 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6151 ins->inst_basereg = args [0]->dreg;
6152 ins->inst_offset = 0;
6153 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6154 MONO_ADD_INS (cfg->cbb, ins);
6156 switch (fsig->params [0]->type) {
6158 ins->type = STACK_I4;
6161 ins->type = STACK_I8;
6164 #if SIZEOF_REGISTER == 8
6165 ins->type = STACK_I8;
6167 ins->type = STACK_I4;
6172 ins->type = STACK_R8;
6175 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6176 ins->type = STACK_OBJ;
6181 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6182 i2f->dreg = mono_alloc_freg (cfg);
6183 i2f->sreg1 = ins->dreg;
6184 i2f->type = STACK_R8;
6185 if (i2f_opcode == OP_MOVE_I4_TO_F)
6186 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6187 MONO_ADD_INS (cfg->cbb, i2f);
6192 if (cfg->gen_write_barriers && is_ref)
6193 emit_write_barrier (cfg, args [0], args [1]);
6195 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6196 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6197 guint32 opcode, f2i_opcode, i2f_opcode;
6198 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6199 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6201 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6202 fsig->params [1]->type == MONO_TYPE_R4) {
6203 opcode = OP_ATOMIC_CAS_I4;
6204 f2i_opcode = OP_MOVE_F_TO_I4;
6205 i2f_opcode = OP_MOVE_I4_TO_F;
6206 cfg->has_atomic_cas_i4 = TRUE;
6208 #if SIZEOF_REGISTER == 8
6210 fsig->params [1]->type == MONO_TYPE_I8 ||
6211 fsig->params [1]->type == MONO_TYPE_R8 ||
6212 fsig->params [1]->type == MONO_TYPE_I) {
6213 opcode = OP_ATOMIC_CAS_I8;
6214 f2i_opcode = OP_MOVE_F_TO_I8;
6215 i2f_opcode = OP_MOVE_I8_TO_F;
6218 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6219 opcode = OP_ATOMIC_CAS_I4;
6220 cfg->has_atomic_cas_i4 = TRUE;
6226 if (!mono_arch_opcode_supported (opcode))
6230 /* TODO: Decompose these opcodes instead of bailing here. */
6231 if (COMPILE_SOFT_FLOAT (cfg))
6234 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6235 f2i_new->dreg = mono_alloc_ireg (cfg);
6236 f2i_new->sreg1 = args [1]->dreg;
6237 if (f2i_opcode == OP_MOVE_F_TO_I4)
6238 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6239 MONO_ADD_INS (cfg->cbb, f2i_new);
6241 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6242 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6243 f2i_cmp->sreg1 = args [2]->dreg;
6244 if (f2i_opcode == OP_MOVE_F_TO_I4)
6245 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6246 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6249 MONO_INST_NEW (cfg, ins, opcode);
6250 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6251 ins->sreg1 = args [0]->dreg;
6252 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6253 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6254 MONO_ADD_INS (cfg->cbb, ins);
6256 switch (fsig->params [1]->type) {
6258 ins->type = STACK_I4;
6261 ins->type = STACK_I8;
6264 #if SIZEOF_REGISTER == 8
6265 ins->type = STACK_I8;
6267 ins->type = STACK_I4;
6272 ins->type = STACK_R8;
6275 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6276 ins->type = STACK_OBJ;
6281 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6282 i2f->dreg = mono_alloc_freg (cfg);
6283 i2f->sreg1 = ins->dreg;
6284 i2f->type = STACK_R8;
6285 if (i2f_opcode == OP_MOVE_I4_TO_F)
6286 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6287 MONO_ADD_INS (cfg->cbb, i2f);
6292 if (cfg->gen_write_barriers && is_ref)
6293 emit_write_barrier (cfg, args [0], args [1]);
6295 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6296 fsig->params [1]->type == MONO_TYPE_I4) {
6297 MonoInst *cmp, *ceq;
6299 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6302 /* int32 r = CAS (location, value, comparand); */
6303 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6304 ins->dreg = alloc_ireg (cfg);
6305 ins->sreg1 = args [0]->dreg;
6306 ins->sreg2 = args [1]->dreg;
6307 ins->sreg3 = args [2]->dreg;
6308 ins->type = STACK_I4;
6309 MONO_ADD_INS (cfg->cbb, ins);
6311 /* bool result = r == comparand; */
6312 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6313 cmp->sreg1 = ins->dreg;
6314 cmp->sreg2 = args [2]->dreg;
6315 cmp->type = STACK_I4;
6316 MONO_ADD_INS (cfg->cbb, cmp);
6318 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6319 ceq->dreg = alloc_ireg (cfg);
6320 ceq->type = STACK_I4;
6321 MONO_ADD_INS (cfg->cbb, ceq);
6323 /* *success = result; */
6324 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6326 cfg->has_atomic_cas_i4 = TRUE;
6328 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6329 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6333 } else if (cmethod->klass->image == mono_defaults.corlib &&
6334 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6335 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6338 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6340 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6341 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6343 if (fsig->params [0]->type == MONO_TYPE_I1)
6344 opcode = OP_ATOMIC_LOAD_I1;
6345 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6346 opcode = OP_ATOMIC_LOAD_U1;
6347 else if (fsig->params [0]->type == MONO_TYPE_I2)
6348 opcode = OP_ATOMIC_LOAD_I2;
6349 else if (fsig->params [0]->type == MONO_TYPE_U2)
6350 opcode = OP_ATOMIC_LOAD_U2;
6351 else if (fsig->params [0]->type == MONO_TYPE_I4)
6352 opcode = OP_ATOMIC_LOAD_I4;
6353 else if (fsig->params [0]->type == MONO_TYPE_U4)
6354 opcode = OP_ATOMIC_LOAD_U4;
6355 else if (fsig->params [0]->type == MONO_TYPE_R4)
6356 opcode = OP_ATOMIC_LOAD_R4;
6357 else if (fsig->params [0]->type == MONO_TYPE_R8)
6358 opcode = OP_ATOMIC_LOAD_R8;
6359 #if SIZEOF_REGISTER == 8
6360 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6361 opcode = OP_ATOMIC_LOAD_I8;
6362 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6363 opcode = OP_ATOMIC_LOAD_U8;
6365 else if (fsig->params [0]->type == MONO_TYPE_I)
6366 opcode = OP_ATOMIC_LOAD_I4;
6367 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6368 opcode = OP_ATOMIC_LOAD_U4;
6372 if (!mono_arch_opcode_supported (opcode))
6375 MONO_INST_NEW (cfg, ins, opcode);
6376 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6377 ins->sreg1 = args [0]->dreg;
6378 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6379 MONO_ADD_INS (cfg->cbb, ins);
6381 switch (fsig->params [0]->type) {
6382 case MONO_TYPE_BOOLEAN:
6389 ins->type = STACK_I4;
6393 ins->type = STACK_I8;
6397 #if SIZEOF_REGISTER == 8
6398 ins->type = STACK_I8;
6400 ins->type = STACK_I4;
6405 ins->type = STACK_R8;
6408 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6409 ins->type = STACK_OBJ;
6415 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6417 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6419 if (fsig->params [0]->type == MONO_TYPE_I1)
6420 opcode = OP_ATOMIC_STORE_I1;
6421 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6422 opcode = OP_ATOMIC_STORE_U1;
6423 else if (fsig->params [0]->type == MONO_TYPE_I2)
6424 opcode = OP_ATOMIC_STORE_I2;
6425 else if (fsig->params [0]->type == MONO_TYPE_U2)
6426 opcode = OP_ATOMIC_STORE_U2;
6427 else if (fsig->params [0]->type == MONO_TYPE_I4)
6428 opcode = OP_ATOMIC_STORE_I4;
6429 else if (fsig->params [0]->type == MONO_TYPE_U4)
6430 opcode = OP_ATOMIC_STORE_U4;
6431 else if (fsig->params [0]->type == MONO_TYPE_R4)
6432 opcode = OP_ATOMIC_STORE_R4;
6433 else if (fsig->params [0]->type == MONO_TYPE_R8)
6434 opcode = OP_ATOMIC_STORE_R8;
6435 #if SIZEOF_REGISTER == 8
6436 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6437 opcode = OP_ATOMIC_STORE_I8;
6438 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6439 opcode = OP_ATOMIC_STORE_U8;
6441 else if (fsig->params [0]->type == MONO_TYPE_I)
6442 opcode = OP_ATOMIC_STORE_I4;
6443 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6444 opcode = OP_ATOMIC_STORE_U4;
6448 if (!mono_arch_opcode_supported (opcode))
6451 MONO_INST_NEW (cfg, ins, opcode);
6452 ins->dreg = args [0]->dreg;
6453 ins->sreg1 = args [1]->dreg;
6454 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6455 MONO_ADD_INS (cfg->cbb, ins);
6457 if (cfg->gen_write_barriers && is_ref)
6458 emit_write_barrier (cfg, args [0], args [1]);
6464 } else if (cmethod->klass->image == mono_defaults.corlib &&
6465 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6466 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6467 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6468 if (should_insert_brekpoint (cfg->method)) {
6469 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6471 MONO_INST_NEW (cfg, ins, OP_NOP);
6472 MONO_ADD_INS (cfg->cbb, ins);
6476 } else if (cmethod->klass->image == mono_defaults.corlib &&
6477 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6478 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6479 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6481 EMIT_NEW_ICONST (cfg, ins, 1);
6483 EMIT_NEW_ICONST (cfg, ins, 0);
6486 } else if (cmethod->klass == mono_defaults.math_class) {
6488 * There is general branchless code for Min/Max, but it does not work for
6490 * http://everything2.com/?node_id=1051618
6492 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6493 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6494 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6495 !strcmp (cmethod->klass->name, "Selector")) ||
6496 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6497 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6498 !strcmp (cmethod->klass->name, "Selector"))
6500 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6501 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6502 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6505 MonoJumpInfoToken *ji;
6508 cfg->disable_llvm = TRUE;
6510 if (args [0]->opcode == OP_GOT_ENTRY) {
6511 pi = args [0]->inst_p1;
6512 g_assert (pi->opcode == OP_PATCH_INFO);
6513 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6516 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6517 ji = args [0]->inst_p0;
6520 NULLIFY_INS (args [0]);
6523 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6524 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6525 ins->dreg = mono_alloc_ireg (cfg);
6527 ins->inst_p0 = mono_string_to_utf8 (s);
6528 MONO_ADD_INS (cfg->cbb, ins);
6534 #ifdef MONO_ARCH_SIMD_INTRINSICS
6535 if (cfg->opt & MONO_OPT_SIMD) {
6536 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6542 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6546 if (COMPILE_LLVM (cfg)) {
6547 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6552 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6556 * This entry point could be used later for arbitrary method
6559 inline static MonoInst*
6560 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6561 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6563 if (method->klass == mono_defaults.string_class) {
6564 /* managed string allocation support */
6565 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6566 MonoInst *iargs [2];
6567 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6568 MonoMethod *managed_alloc = NULL;
6570 g_assert (vtable); /*Should not fail since it System.String*/
6571 #ifndef MONO_CROSS_COMPILE
6572 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6576 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6577 iargs [1] = args [0];
6578 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6585 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6587 MonoInst *store, *temp;
6590 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6591 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6594 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6595 * would be different than the MonoInst's used to represent arguments, and
6596 * the ldelema implementation can't deal with that.
6597 * Solution: When ldelema is used on an inline argument, create a var for
6598 * it, emit ldelema on that var, and emit the saving code below in
6599 * inline_method () if needed.
6601 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6602 cfg->args [i] = temp;
6603 /* This uses cfg->args [i] which is set by the preceeding line */
6604 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6605 store->cil_code = sp [0]->cil_code;
6610 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6611 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6613 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6615 check_inline_called_method_name_limit (MonoMethod *called_method)
6618 static const char *limit = NULL;
6620 if (limit == NULL) {
6621 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6623 if (limit_string != NULL)
6624 limit = limit_string;
6629 if (limit [0] != '\0') {
6630 char *called_method_name = mono_method_full_name (called_method, TRUE);
6632 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6633 g_free (called_method_name);
6635 //return (strncmp_result <= 0);
6636 return (strncmp_result == 0);
6643 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6645 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6648 static const char *limit = NULL;
6650 if (limit == NULL) {
6651 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6652 if (limit_string != NULL) {
6653 limit = limit_string;
6659 if (limit [0] != '\0') {
6660 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6662 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6663 g_free (caller_method_name);
6665 //return (strncmp_result <= 0);
6666 return (strncmp_result == 0);
6674 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6676 static double r8_0 = 0.0;
6677 static float r4_0 = 0.0;
6681 rtype = mini_get_underlying_type (cfg, rtype);
6685 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6686 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6687 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6688 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6689 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6690 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6691 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6692 ins->type = STACK_R4;
6693 ins->inst_p0 = (void*)&r4_0;
6695 MONO_ADD_INS (cfg->cbb, ins);
6696 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6697 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6698 ins->type = STACK_R8;
6699 ins->inst_p0 = (void*)&r8_0;
6701 MONO_ADD_INS (cfg->cbb, ins);
6702 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6703 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6704 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6705 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6706 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6708 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6713 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6717 rtype = mini_get_underlying_type (cfg, rtype);
6721 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6722 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6723 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6724 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6725 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6726 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6727 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6728 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6729 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6730 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6731 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6732 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6733 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6734 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6736 emit_init_rvar (cfg, dreg, rtype);
6740 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6742 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6744 MonoInst *var = cfg->locals [local];
6745 if (COMPILE_SOFT_FLOAT (cfg)) {
6747 int reg = alloc_dreg (cfg, var->type);
6748 emit_init_rvar (cfg, reg, type);
6749 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6752 emit_init_rvar (cfg, var->dreg, type);
6754 emit_dummy_init_rvar (cfg, var->dreg, type);
6761 * Return the cost of inlining CMETHOD.
6764 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6765 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6767 MonoInst *ins, *rvar = NULL;
6768 MonoMethodHeader *cheader;
6769 MonoBasicBlock *ebblock, *sbblock;
6771 MonoMethod *prev_inlined_method;
6772 MonoInst **prev_locals, **prev_args;
6773 MonoType **prev_arg_types;
6774 guint prev_real_offset;
6775 GHashTable *prev_cbb_hash;
6776 MonoBasicBlock **prev_cil_offset_to_bb;
6777 MonoBasicBlock *prev_cbb;
6778 unsigned char* prev_cil_start;
6779 guint32 prev_cil_offset_to_bb_len;
6780 MonoMethod *prev_current_method;
6781 MonoGenericContext *prev_generic_context;
6782 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6784 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6786 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6787 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6790 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6791 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6796 fsig = mono_method_signature (cmethod);
6798 if (cfg->verbose_level > 2)
6799 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6801 if (!cmethod->inline_info) {
6802 cfg->stat_inlineable_methods++;
6803 cmethod->inline_info = 1;
6806 /* allocate local variables */
6807 cheader = mono_method_get_header (cmethod);
6809 if (cheader == NULL || mono_loader_get_last_error ()) {
6810 MonoLoaderError *error = mono_loader_get_last_error ();
6813 mono_metadata_free_mh (cheader);
6814 if (inline_always && error)
6815 mono_cfg_set_exception (cfg, error->exception_type);
6817 mono_loader_clear_error ();
6821 /*Must verify before creating locals as it can cause the JIT to assert.*/
6822 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6823 mono_metadata_free_mh (cheader);
6827 /* allocate space to store the return value */
6828 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6829 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6832 prev_locals = cfg->locals;
6833 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6834 for (i = 0; i < cheader->num_locals; ++i)
6835 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6837 /* allocate start and end blocks */
6838 /* This is needed so if the inline is aborted, we can clean up */
6839 NEW_BBLOCK (cfg, sbblock);
6840 sbblock->real_offset = real_offset;
6842 NEW_BBLOCK (cfg, ebblock);
6843 ebblock->block_num = cfg->num_bblocks++;
6844 ebblock->real_offset = real_offset;
6846 prev_args = cfg->args;
6847 prev_arg_types = cfg->arg_types;
6848 prev_inlined_method = cfg->inlined_method;
6849 cfg->inlined_method = cmethod;
6850 cfg->ret_var_set = FALSE;
6851 cfg->inline_depth ++;
6852 prev_real_offset = cfg->real_offset;
6853 prev_cbb_hash = cfg->cbb_hash;
6854 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6855 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6856 prev_cil_start = cfg->cil_start;
6857 prev_cbb = cfg->cbb;
6858 prev_current_method = cfg->current_method;
6859 prev_generic_context = cfg->generic_context;
6860 prev_ret_var_set = cfg->ret_var_set;
6861 prev_disable_inline = cfg->disable_inline;
6863 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6866 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6868 ret_var_set = cfg->ret_var_set;
6870 cfg->inlined_method = prev_inlined_method;
6871 cfg->real_offset = prev_real_offset;
6872 cfg->cbb_hash = prev_cbb_hash;
6873 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6874 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6875 cfg->cil_start = prev_cil_start;
6876 cfg->locals = prev_locals;
6877 cfg->args = prev_args;
6878 cfg->arg_types = prev_arg_types;
6879 cfg->current_method = prev_current_method;
6880 cfg->generic_context = prev_generic_context;
6881 cfg->ret_var_set = prev_ret_var_set;
6882 cfg->disable_inline = prev_disable_inline;
6883 cfg->inline_depth --;
6885 if ((costs >= 0 && costs < 60) || inline_always) {
6886 if (cfg->verbose_level > 2)
6887 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6889 cfg->stat_inlined_methods++;
6891 /* always add some code to avoid block split failures */
6892 MONO_INST_NEW (cfg, ins, OP_NOP);
6893 MONO_ADD_INS (prev_cbb, ins);
6895 prev_cbb->next_bb = sbblock;
6896 link_bblock (cfg, prev_cbb, sbblock);
6899 * Get rid of the begin and end bblocks if possible to aid local
6902 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6904 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6905 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6907 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6908 MonoBasicBlock *prev = ebblock->in_bb [0];
6909 mono_merge_basic_blocks (cfg, prev, ebblock);
6911 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6912 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6913 cfg->cbb = prev_cbb;
6917 * Its possible that the rvar is set in some prev bblock, but not in others.
6923 for (i = 0; i < ebblock->in_count; ++i) {
6924 bb = ebblock->in_bb [i];
6926 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6929 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6938 *out_cbb = cfg->cbb;
6942 * If the inlined method contains only a throw, then the ret var is not
6943 * set, so set it to a dummy value.
6946 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6948 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6951 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6954 if (cfg->verbose_level > 2)
6955 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6956 cfg->exception_type = MONO_EXCEPTION_NONE;
6957 mono_loader_clear_error ();
6959 /* This gets rid of the newly added bblocks */
6960 cfg->cbb = prev_cbb;
6962 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6967 * Some of these comments may well be out-of-date.
6968 * Design decisions: we do a single pass over the IL code (and we do bblock
6969 * splitting/merging in the few cases when it's required: a back jump to an IL
6970 * address that was not already seen as bblock starting point).
6971 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6972 * Complex operations are decomposed in simpler ones right away. We need to let the
6973 * arch-specific code peek and poke inside this process somehow (except when the
6974 * optimizations can take advantage of the full semantic info of coarse opcodes).
6975 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6976 * MonoInst->opcode initially is the IL opcode or some simplification of that
6977 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6978 * opcode with value bigger than OP_LAST.
6979 * At this point the IR can be handed over to an interpreter, a dumb code generator
6980 * or to the optimizing code generator that will translate it to SSA form.
6982 * Profiling directed optimizations.
6983 * We may compile by default with few or no optimizations and instrument the code
6984 * or the user may indicate what methods to optimize the most either in a config file
6985 * or through repeated runs where the compiler applies offline the optimizations to
6986 * each method and then decides if it was worth it.
6989 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6990 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6991 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6992 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6993 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6994 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6995 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6996 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6998 /* offset from br.s -> br like opcodes */
6999 #define BIG_BRANCH_OFFSET 13
7002 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7004 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7006 return b == NULL || b == bb;
7010 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7012 unsigned char *ip = start;
7013 unsigned char *target;
7016 MonoBasicBlock *bblock;
7017 const MonoOpcode *opcode;
7020 cli_addr = ip - start;
7021 i = mono_opcode_value ((const guint8 **)&ip, end);
7024 opcode = &mono_opcodes [i];
7025 switch (opcode->argument) {
7026 case MonoInlineNone:
7029 case MonoInlineString:
7030 case MonoInlineType:
7031 case MonoInlineField:
7032 case MonoInlineMethod:
7035 case MonoShortInlineR:
7042 case MonoShortInlineVar:
7043 case MonoShortInlineI:
7046 case MonoShortInlineBrTarget:
7047 target = start + cli_addr + 2 + (signed char)ip [1];
7048 GET_BBLOCK (cfg, bblock, target);
7051 GET_BBLOCK (cfg, bblock, ip);
7053 case MonoInlineBrTarget:
7054 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7055 GET_BBLOCK (cfg, bblock, target);
7058 GET_BBLOCK (cfg, bblock, ip);
7060 case MonoInlineSwitch: {
7061 guint32 n = read32 (ip + 1);
7064 cli_addr += 5 + 4 * n;
7065 target = start + cli_addr;
7066 GET_BBLOCK (cfg, bblock, target);
7068 for (j = 0; j < n; ++j) {
7069 target = start + cli_addr + (gint32)read32 (ip);
7070 GET_BBLOCK (cfg, bblock, target);
7080 g_assert_not_reached ();
7083 if (i == CEE_THROW) {
7084 unsigned char *bb_start = ip - 1;
7086 /* Find the start of the bblock containing the throw */
7088 while ((bb_start >= start) && !bblock) {
7089 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7093 bblock->out_of_line = 1;
7103 static inline MonoMethod *
7104 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7108 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7109 method = mono_method_get_wrapper_data (m, token);
7112 method = mono_class_inflate_generic_method_checked (method, context, &error);
7113 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7116 method = mono_get_method_full (m->klass->image, token, klass, context);
7122 static inline MonoMethod *
7123 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7125 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7127 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7133 static inline MonoClass*
7134 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7139 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7140 klass = mono_method_get_wrapper_data (method, token);
7142 klass = mono_class_inflate_generic_class (klass, context);
7144 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7145 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7148 mono_class_init (klass);
7152 static inline MonoMethodSignature*
7153 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7155 MonoMethodSignature *fsig;
7157 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7160 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7162 fsig = mono_inflate_generic_signature (fsig, context, &error);
7164 g_assert (mono_error_ok (&error));
7167 fsig = mono_metadata_parse_signature (method->klass->image, token);
7173 throw_exception (void)
7175 static MonoMethod *method = NULL;
7178 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7179 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7186 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7188 MonoMethod *thrower = throw_exception ();
7191 EMIT_NEW_PCONST (cfg, args [0], ex);
7192 mono_emit_method_call (cfg, thrower, args, NULL);
7196 * Return the original method is a wrapper is specified. We can only access
7197 * the custom attributes from the original method.
7200 get_original_method (MonoMethod *method)
7202 if (method->wrapper_type == MONO_WRAPPER_NONE)
7205 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7206 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7209 /* in other cases we need to find the original method */
7210 return mono_marshal_method_from_wrapper (method);
7214 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7215 MonoBasicBlock *bblock, unsigned char *ip)
7217 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7218 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7220 emit_throw_exception (cfg, ex);
7224 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7225 MonoBasicBlock *bblock, unsigned char *ip)
7227 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7228 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7230 emit_throw_exception (cfg, ex);
7234 * Check that the IL instructions at ip are the array initialization
7235 * sequence and return the pointer to the data and the size.
7238 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7241 * newarr[System.Int32]
7243 * ldtoken field valuetype ...
7244 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7246 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7248 guint32 token = read32 (ip + 7);
7249 guint32 field_token = read32 (ip + 2);
7250 guint32 field_index = field_token & 0xffffff;
7252 const char *data_ptr;
7254 MonoMethod *cmethod;
7255 MonoClass *dummy_class;
7256 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7260 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7264 *out_field_token = field_token;
7266 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7269 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7271 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7272 case MONO_TYPE_BOOLEAN:
7276 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7277 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7278 case MONO_TYPE_CHAR:
7295 if (size > mono_type_size (field->type, &dummy_align))
7298 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7299 if (!image_is_dynamic (method->klass->image)) {
7300 field_index = read32 (ip + 2) & 0xffffff;
7301 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7302 data_ptr = mono_image_rva_map (method->klass->image, rva);
7303 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7304 /* for aot code we do the lookup on load */
7305 if (aot && data_ptr)
7306 return GUINT_TO_POINTER (rva);
7308 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7310 data_ptr = mono_field_get_data (field);
7318 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7320 char *method_fname = mono_method_full_name (method, TRUE);
7322 MonoMethodHeader *header = mono_method_get_header (method);
7324 if (header->code_size == 0)
7325 method_code = g_strdup ("method body is empty.");
7327 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7328 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7329 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7330 g_free (method_fname);
7331 g_free (method_code);
7332 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7336 set_exception_object (MonoCompile *cfg, MonoException *exception)
7338 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7339 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7340 cfg->exception_ptr = exception;
7344 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7347 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7348 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7349 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7350 /* Optimize reg-reg moves away */
7352 * Can't optimize other opcodes, since sp[0] might point to
7353 * the last ins of a decomposed opcode.
7355 sp [0]->dreg = (cfg)->locals [n]->dreg;
7357 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7362 * ldloca inhibits many optimizations so try to get rid of it in common
7365 static inline unsigned char *
7366 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7376 local = read16 (ip + 2);
7380 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7381 /* From the INITOBJ case */
7382 token = read32 (ip + 2);
7383 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7384 CHECK_TYPELOAD (klass);
7385 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7386 emit_init_local (cfg, local, type, TRUE);
7394 is_exception_class (MonoClass *class)
7397 if (class == mono_defaults.exception_class)
7399 class = class->parent;
7405 * is_jit_optimizer_disabled:
7407 * Determine whenever M's assembly has a DebuggableAttribute with the
7408 * IsJITOptimizerDisabled flag set.
7411 is_jit_optimizer_disabled (MonoMethod *m)
7413 MonoAssembly *ass = m->klass->image->assembly;
7414 MonoCustomAttrInfo* attrs;
7415 static MonoClass *klass;
7417 gboolean val = FALSE;
7420 if (ass->jit_optimizer_disabled_inited)
7421 return ass->jit_optimizer_disabled;
7424 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7427 ass->jit_optimizer_disabled = FALSE;
7428 mono_memory_barrier ();
7429 ass->jit_optimizer_disabled_inited = TRUE;
7433 attrs = mono_custom_attrs_from_assembly (ass);
7435 for (i = 0; i < attrs->num_attrs; ++i) {
7436 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7438 MonoMethodSignature *sig;
7440 if (!attr->ctor || attr->ctor->klass != klass)
7442 /* Decode the attribute. See reflection.c */
7443 p = (const char*)attr->data;
7444 g_assert (read16 (p) == 0x0001);
7447 // FIXME: Support named parameters
7448 sig = mono_method_signature (attr->ctor);
7449 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7451 /* Two boolean arguments */
7455 mono_custom_attrs_free (attrs);
7458 ass->jit_optimizer_disabled = val;
7459 mono_memory_barrier ();
7460 ass->jit_optimizer_disabled_inited = TRUE;
7466 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7468 gboolean supported_tail_call;
7471 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7472 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7474 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7477 for (i = 0; i < fsig->param_count; ++i) {
7478 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7479 /* These can point to the current method's stack */
7480 supported_tail_call = FALSE;
7482 if (fsig->hasthis && cmethod->klass->valuetype)
7483 /* this might point to the current method's stack */
7484 supported_tail_call = FALSE;
7485 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7486 supported_tail_call = FALSE;
7487 if (cfg->method->save_lmf)
7488 supported_tail_call = FALSE;
7489 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7490 supported_tail_call = FALSE;
7491 if (call_opcode != CEE_CALL)
7492 supported_tail_call = FALSE;
7494 /* Debugging support */
7496 if (supported_tail_call) {
7497 if (!mono_debug_count ())
7498 supported_tail_call = FALSE;
7502 return supported_tail_call;
7505 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7506 * it to the thread local value based on the tls_offset field. Every other kind of access to
7507 * the field causes an assert.
7510 is_magic_tls_access (MonoClassField *field)
7512 if (strcmp (field->name, "tlsdata"))
7514 if (strcmp (field->parent->name, "ThreadLocal`1"))
7516 return field->parent->image == mono_defaults.corlib;
7519 /* emits the code needed to access a managed tls var (like ThreadStatic)
7520 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7521 * pointer for the current thread.
7522 * Returns the MonoInst* representing the address of the tls var.
7525 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7528 int static_data_reg, array_reg, dreg;
7529 int offset2_reg, idx_reg;
7530 // inlined access to the tls data (see threads.c)
7531 static_data_reg = alloc_ireg (cfg);
7532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7533 idx_reg = alloc_ireg (cfg);
7534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
7535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7536 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7537 array_reg = alloc_ireg (cfg);
7538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7539 offset2_reg = alloc_ireg (cfg);
7540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
7541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
7542 dreg = alloc_ireg (cfg);
7543 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7548 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7549 * this address is cached per-method in cached_tls_addr.
7552 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7554 MonoInst *load, *addr, *temp, *store, *thread_ins;
7555 MonoClassField *offset_field;
7557 if (*cached_tls_addr) {
7558 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7561 thread_ins = mono_get_thread_intrinsic (cfg);
7562 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7564 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7566 MONO_ADD_INS (cfg->cbb, thread_ins);
7568 MonoMethod *thread_method;
7569 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7570 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7572 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7573 addr->klass = mono_class_from_mono_type (tls_field->type);
7574 addr->type = STACK_MP;
7575 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7576 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7578 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7585 * Handle calls made to ctors from NEWOBJ opcodes.
7587 * REF_BBLOCK will point to the current bblock after the call.
7590 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7591 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7593 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7594 MonoBasicBlock *bblock = *ref_bblock;
7596 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7597 mono_method_is_generic_sharable (cmethod, TRUE)) {
7598 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7599 mono_class_vtable (cfg->domain, cmethod->klass);
7600 CHECK_TYPELOAD (cmethod->klass);
7602 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7603 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7606 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7607 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7609 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7611 CHECK_TYPELOAD (cmethod->klass);
7612 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7617 /* Avoid virtual calls to ctors if possible */
7618 if (mono_class_is_marshalbyref (cmethod->klass))
7619 callvirt_this_arg = sp [0];
7621 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7622 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7623 CHECK_CFG_EXCEPTION;
7624 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7625 mono_method_check_inlining (cfg, cmethod) &&
7626 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7629 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7630 cfg->real_offset += 5;
7632 *inline_costs += costs - 5;
7633 *ref_bblock = bblock;
7635 INLINE_FAILURE ("inline failure");
7636 // FIXME-VT: Clean this up
7637 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7638 GSHAREDVT_FAILURE(*ip);
7639 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7641 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7644 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7645 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7646 } else if (context_used &&
7647 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7648 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7649 MonoInst *cmethod_addr;
7651 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7653 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7654 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7656 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7658 INLINE_FAILURE ("ctor call");
7659 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7660 callvirt_this_arg, NULL, vtable_arg);
7667 * mono_method_to_ir:
7669 * Translate the .net IL into linear IR.
7672 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7673 MonoInst *return_var, MonoInst **inline_args,
7674 guint inline_offset, gboolean is_virtual_call)
7677 MonoInst *ins, **sp, **stack_start;
7678 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7679 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7680 MonoMethod *cmethod, *method_definition;
7681 MonoInst **arg_array;
7682 MonoMethodHeader *header;
7684 guint32 token, ins_flag;
7686 MonoClass *constrained_class = NULL;
7687 unsigned char *ip, *end, *target, *err_pos;
7688 MonoMethodSignature *sig;
7689 MonoGenericContext *generic_context = NULL;
7690 MonoGenericContainer *generic_container = NULL;
7691 MonoType **param_types;
7692 int i, n, start_new_bblock, dreg;
7693 int num_calls = 0, inline_costs = 0;
7694 int breakpoint_id = 0;
7696 GSList *class_inits = NULL;
7697 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7699 gboolean init_locals, seq_points, skip_dead_blocks;
7700 gboolean sym_seq_points = FALSE;
7701 MonoInst *cached_tls_addr = NULL;
7702 MonoDebugMethodInfo *minfo;
7703 MonoBitSet *seq_point_locs = NULL;
7704 MonoBitSet *seq_point_set_locs = NULL;
7706 cfg->disable_inline = is_jit_optimizer_disabled (method);
7708 /* serialization and xdomain stuff may need access to private fields and methods */
7709 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7710 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7711 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7712 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7713 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7714 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7716 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7717 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7718 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7719 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7720 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7722 image = method->klass->image;
7723 header = mono_method_get_header (method);
7725 MonoLoaderError *error;
7727 if ((error = mono_loader_get_last_error ())) {
7728 mono_cfg_set_exception (cfg, error->exception_type);
7730 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7731 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7733 goto exception_exit;
7735 generic_container = mono_method_get_generic_container (method);
7736 sig = mono_method_signature (method);
7737 num_args = sig->hasthis + sig->param_count;
7738 ip = (unsigned char*)header->code;
7739 cfg->cil_start = ip;
7740 end = ip + header->code_size;
7741 cfg->stat_cil_code_size += header->code_size;
7743 seq_points = cfg->gen_seq_points && cfg->method == method;
7745 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7746 /* We could hit a seq point before attaching to the JIT (#8338) */
7750 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7751 minfo = mono_debug_lookup_method (method);
7753 MonoSymSeqPoint *sps;
7754 int i, n_il_offsets;
7756 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7757 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7758 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7759 sym_seq_points = TRUE;
7760 for (i = 0; i < n_il_offsets; ++i) {
7761 if (sps [i].il_offset < header->code_size)
7762 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7765 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7766 /* Methods without line number info like auto-generated property accessors */
7767 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7768 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7769 sym_seq_points = TRUE;
7774 * Methods without init_locals set could cause asserts in various passes
7775 * (#497220). To work around this, we emit dummy initialization opcodes
7776 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7777 * on some platforms.
7779 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7780 init_locals = header->init_locals;
7784 method_definition = method;
7785 while (method_definition->is_inflated) {
7786 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7787 method_definition = imethod->declaring;
7790 /* SkipVerification is not allowed if core-clr is enabled */
7791 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7793 dont_verify_stloc = TRUE;
7796 if (sig->is_inflated)
7797 generic_context = mono_method_get_context (method);
7798 else if (generic_container)
7799 generic_context = &generic_container->context;
7800 cfg->generic_context = generic_context;
7802 if (!cfg->generic_sharing_context)
7803 g_assert (!sig->has_type_parameters);
7805 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7806 g_assert (method->is_inflated);
7807 g_assert (mono_method_get_context (method)->method_inst);
7809 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7810 g_assert (sig->generic_param_count);
7812 if (cfg->method == method) {
7813 cfg->real_offset = 0;
7815 cfg->real_offset = inline_offset;
7818 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7819 cfg->cil_offset_to_bb_len = header->code_size;
7821 cfg->current_method = method;
7823 if (cfg->verbose_level > 2)
7824 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7826 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7828 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7829 for (n = 0; n < sig->param_count; ++n)
7830 param_types [n + sig->hasthis] = sig->params [n];
7831 cfg->arg_types = param_types;
7833 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7834 if (cfg->method == method) {
7836 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7837 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7840 NEW_BBLOCK (cfg, start_bblock);
7841 cfg->bb_entry = start_bblock;
7842 start_bblock->cil_code = NULL;
7843 start_bblock->cil_length = 0;
7846 NEW_BBLOCK (cfg, end_bblock);
7847 cfg->bb_exit = end_bblock;
7848 end_bblock->cil_code = NULL;
7849 end_bblock->cil_length = 0;
7850 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7851 g_assert (cfg->num_bblocks == 2);
7853 arg_array = cfg->args;
7855 if (header->num_clauses) {
7856 cfg->spvars = g_hash_table_new (NULL, NULL);
7857 cfg->exvars = g_hash_table_new (NULL, NULL);
7859 /* handle exception clauses */
7860 for (i = 0; i < header->num_clauses; ++i) {
7861 MonoBasicBlock *try_bb;
7862 MonoExceptionClause *clause = &header->clauses [i];
7863 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7864 try_bb->real_offset = clause->try_offset;
7865 try_bb->try_start = TRUE;
7866 try_bb->region = ((i + 1) << 8) | clause->flags;
7867 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7868 tblock->real_offset = clause->handler_offset;
7869 tblock->flags |= BB_EXCEPTION_HANDLER;
7872 * Linking the try block with the EH block hinders inlining as we won't be able to
7873 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7875 if (COMPILE_LLVM (cfg))
7876 link_bblock (cfg, try_bb, tblock);
7878 if (*(ip + clause->handler_offset) == CEE_POP)
7879 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7881 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7882 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7883 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7884 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7885 MONO_ADD_INS (tblock, ins);
7887 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7888 /* finally clauses already have a seq point */
7889 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7890 MONO_ADD_INS (tblock, ins);
7893 /* todo: is a fault block unsafe to optimize? */
7894 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7895 tblock->flags |= BB_EXCEPTION_UNSAFE;
7898 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7900 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7902 /* catch and filter blocks get the exception object on the stack */
7903 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7904 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7906 /* mostly like handle_stack_args (), but just sets the input args */
7907 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7908 tblock->in_scount = 1;
7909 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7910 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7914 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7915 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7916 if (!cfg->compile_llvm) {
7917 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7918 ins->dreg = tblock->in_stack [0]->dreg;
7919 MONO_ADD_INS (tblock, ins);
7922 MonoInst *dummy_use;
7925 * Add a dummy use for the exvar so its liveness info will be
7928 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7931 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7932 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7933 tblock->flags |= BB_EXCEPTION_HANDLER;
7934 tblock->real_offset = clause->data.filter_offset;
7935 tblock->in_scount = 1;
7936 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7937 /* The filter block shares the exvar with the handler block */
7938 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7939 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7940 MONO_ADD_INS (tblock, ins);
7944 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7945 clause->data.catch_class &&
7946 cfg->generic_sharing_context &&
7947 mono_class_check_context_used (clause->data.catch_class)) {
7949 * In shared generic code with catch
7950 * clauses containing type variables
7951 * the exception handling code has to
7952 * be able to get to the rgctx.
7953 * Therefore we have to make sure that
7954 * the vtable/mrgctx argument (for
7955 * static or generic methods) or the
7956 * "this" argument (for non-static
7957 * methods) are live.
7959 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7960 mini_method_get_context (method)->method_inst ||
7961 method->klass->valuetype) {
7962 mono_get_vtable_var (cfg);
7964 MonoInst *dummy_use;
7966 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7971 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7972 cfg->cbb = start_bblock;
7973 cfg->args = arg_array;
7974 mono_save_args (cfg, sig, inline_args);
7977 /* FIRST CODE BLOCK */
7978 NEW_BBLOCK (cfg, bblock);
7979 bblock->cil_code = ip;
7983 ADD_BBLOCK (cfg, bblock);
7985 if (cfg->method == method) {
7986 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7987 if (breakpoint_id) {
7988 MONO_INST_NEW (cfg, ins, OP_BREAK);
7989 MONO_ADD_INS (bblock, ins);
7993 /* we use a separate basic block for the initialization code */
7994 NEW_BBLOCK (cfg, init_localsbb);
7995 cfg->bb_init = init_localsbb;
7996 init_localsbb->real_offset = cfg->real_offset;
7997 start_bblock->next_bb = init_localsbb;
7998 init_localsbb->next_bb = bblock;
7999 link_bblock (cfg, start_bblock, init_localsbb);
8000 link_bblock (cfg, init_localsbb, bblock);
8002 cfg->cbb = init_localsbb;
8004 if (cfg->gsharedvt && cfg->method == method) {
8005 MonoGSharedVtMethodInfo *info;
8006 MonoInst *var, *locals_var;
8009 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8010 info->method = cfg->method;
8011 info->count_entries = 16;
8012 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8013 cfg->gsharedvt_info = info;
8015 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8016 /* prevent it from being register allocated */
8017 //var->flags |= MONO_INST_VOLATILE;
8018 cfg->gsharedvt_info_var = var;
8020 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8021 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8023 /* Allocate locals */
8024 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8025 /* prevent it from being register allocated */
8026 //locals_var->flags |= MONO_INST_VOLATILE;
8027 cfg->gsharedvt_locals_var = locals_var;
8029 dreg = alloc_ireg (cfg);
8030 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8032 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8033 ins->dreg = locals_var->dreg;
8035 MONO_ADD_INS (cfg->cbb, ins);
8036 cfg->gsharedvt_locals_var_ins = ins;
8038 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8041 ins->flags |= MONO_INST_INIT;
8045 if (mono_security_core_clr_enabled ()) {
8046 /* check if this is native code, e.g. an icall or a p/invoke */
8047 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8048 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8050 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8051 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8053 /* if this ia a native call then it can only be JITted from platform code */
8054 if ((icall || pinvk) && method->klass && method->klass->image) {
8055 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8056 MonoException *ex = icall ? mono_get_exception_security () :
8057 mono_get_exception_method_access ();
8058 emit_throw_exception (cfg, ex);
8065 CHECK_CFG_EXCEPTION;
8067 if (header->code_size == 0)
8070 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8075 if (cfg->method == method)
8076 mono_debug_init_method (cfg, bblock, breakpoint_id);
8078 for (n = 0; n < header->num_locals; ++n) {
8079 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8084 /* We force the vtable variable here for all shared methods
8085 for the possibility that they might show up in a stack
8086 trace where their exact instantiation is needed. */
8087 if (cfg->generic_sharing_context && method == cfg->method) {
8088 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8089 mini_method_get_context (method)->method_inst ||
8090 method->klass->valuetype) {
8091 mono_get_vtable_var (cfg);
8093 /* FIXME: Is there a better way to do this?
8094 We need the variable live for the duration
8095 of the whole method. */
8096 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8100 /* add a check for this != NULL to inlined methods */
8101 if (is_virtual_call) {
8104 NEW_ARGLOAD (cfg, arg_ins, 0);
8105 MONO_ADD_INS (cfg->cbb, arg_ins);
8106 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8109 skip_dead_blocks = !dont_verify;
8110 if (skip_dead_blocks) {
8111 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8116 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8117 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8120 start_new_bblock = 0;
8123 if (cfg->method == method)
8124 cfg->real_offset = ip - header->code;
8126 cfg->real_offset = inline_offset;
8131 if (start_new_bblock) {
8132 bblock->cil_length = ip - bblock->cil_code;
8133 if (start_new_bblock == 2) {
8134 g_assert (ip == tblock->cil_code);
8136 GET_BBLOCK (cfg, tblock, ip);
8138 bblock->next_bb = tblock;
8141 start_new_bblock = 0;
8142 for (i = 0; i < bblock->in_scount; ++i) {
8143 if (cfg->verbose_level > 3)
8144 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8145 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8149 g_slist_free (class_inits);
8152 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8153 link_bblock (cfg, bblock, tblock);
8154 if (sp != stack_start) {
8155 handle_stack_args (cfg, stack_start, sp - stack_start);
8157 CHECK_UNVERIFIABLE (cfg);
8159 bblock->next_bb = tblock;
8162 for (i = 0; i < bblock->in_scount; ++i) {
8163 if (cfg->verbose_level > 3)
8164 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8165 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8168 g_slist_free (class_inits);
8173 if (skip_dead_blocks) {
8174 int ip_offset = ip - header->code;
8176 if (ip_offset == bb->end)
8180 int op_size = mono_opcode_size (ip, end);
8181 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8183 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8185 if (ip_offset + op_size == bb->end) {
8186 MONO_INST_NEW (cfg, ins, OP_NOP);
8187 MONO_ADD_INS (bblock, ins);
8188 start_new_bblock = 1;
8196 * Sequence points are points where the debugger can place a breakpoint.
8197 * Currently, we generate these automatically at points where the IL
8200 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8202 * Make methods interruptable at the beginning, and at the targets of
8203 * backward branches.
8204 * Also, do this at the start of every bblock in methods with clauses too,
8205 * to be able to handle instructions with inprecise control flow like
8207 * Backward branches are handled at the end of method-to-ir ().
8209 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8211 /* Avoid sequence points on empty IL like .volatile */
8212 // FIXME: Enable this
8213 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8214 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8215 if (sp != stack_start)
8216 ins->flags |= MONO_INST_NONEMPTY_STACK;
8217 MONO_ADD_INS (cfg->cbb, ins);
8220 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8223 bblock->real_offset = cfg->real_offset;
8225 if ((cfg->method == method) && cfg->coverage_info) {
8226 guint32 cil_offset = ip - header->code;
8227 cfg->coverage_info->data [cil_offset].cil_code = ip;
8229 /* TODO: Use an increment here */
8230 #if defined(TARGET_X86)
8231 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8232 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8234 MONO_ADD_INS (cfg->cbb, ins);
8236 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8237 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8241 if (cfg->verbose_level > 3)
8242 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8246 if (seq_points && !sym_seq_points && sp != stack_start) {
8248 * The C# compiler uses these nops to notify the JIT that it should
8249 * insert seq points.
8251 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8252 MONO_ADD_INS (cfg->cbb, ins);
8254 if (cfg->keep_cil_nops)
8255 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8257 MONO_INST_NEW (cfg, ins, OP_NOP);
8259 MONO_ADD_INS (bblock, ins);
8262 if (should_insert_brekpoint (cfg->method)) {
8263 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8265 MONO_INST_NEW (cfg, ins, OP_NOP);
8268 MONO_ADD_INS (bblock, ins);
8274 CHECK_STACK_OVF (1);
8275 n = (*ip)-CEE_LDARG_0;
8277 EMIT_NEW_ARGLOAD (cfg, ins, n);
8285 CHECK_STACK_OVF (1);
8286 n = (*ip)-CEE_LDLOC_0;
8288 EMIT_NEW_LOCLOAD (cfg, ins, n);
8297 n = (*ip)-CEE_STLOC_0;
8300 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8302 emit_stloc_ir (cfg, sp, header, n);
8309 CHECK_STACK_OVF (1);
8312 EMIT_NEW_ARGLOAD (cfg, ins, n);
8318 CHECK_STACK_OVF (1);
8321 NEW_ARGLOADA (cfg, ins, n);
8322 MONO_ADD_INS (cfg->cbb, ins);
8332 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8334 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8339 CHECK_STACK_OVF (1);
8342 EMIT_NEW_LOCLOAD (cfg, ins, n);
8346 case CEE_LDLOCA_S: {
8347 unsigned char *tmp_ip;
8349 CHECK_STACK_OVF (1);
8350 CHECK_LOCAL (ip [1]);
8352 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8358 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8367 CHECK_LOCAL (ip [1]);
8368 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8370 emit_stloc_ir (cfg, sp, header, ip [1]);
8375 CHECK_STACK_OVF (1);
8376 EMIT_NEW_PCONST (cfg, ins, NULL);
8377 ins->type = STACK_OBJ;
8382 CHECK_STACK_OVF (1);
8383 EMIT_NEW_ICONST (cfg, ins, -1);
8396 CHECK_STACK_OVF (1);
8397 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8403 CHECK_STACK_OVF (1);
8405 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8411 CHECK_STACK_OVF (1);
8412 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8418 CHECK_STACK_OVF (1);
8419 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8420 ins->type = STACK_I8;
8421 ins->dreg = alloc_dreg (cfg, STACK_I8);
8423 ins->inst_l = (gint64)read64 (ip);
8424 MONO_ADD_INS (bblock, ins);
8430 gboolean use_aotconst = FALSE;
8432 #ifdef TARGET_POWERPC
8433 /* FIXME: Clean this up */
8434 if (cfg->compile_aot)
8435 use_aotconst = TRUE;
8438 /* FIXME: we should really allocate this only late in the compilation process */
8439 f = mono_domain_alloc (cfg->domain, sizeof (float));
8441 CHECK_STACK_OVF (1);
8447 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8449 dreg = alloc_freg (cfg);
8450 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8451 ins->type = cfg->r4_stack_type;
8453 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8454 ins->type = cfg->r4_stack_type;
8455 ins->dreg = alloc_dreg (cfg, STACK_R8);
8457 MONO_ADD_INS (bblock, ins);
8467 gboolean use_aotconst = FALSE;
8469 #ifdef TARGET_POWERPC
8470 /* FIXME: Clean this up */
8471 if (cfg->compile_aot)
8472 use_aotconst = TRUE;
8475 /* FIXME: we should really allocate this only late in the compilation process */
8476 d = mono_domain_alloc (cfg->domain, sizeof (double));
8478 CHECK_STACK_OVF (1);
8484 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8486 dreg = alloc_freg (cfg);
8487 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8488 ins->type = STACK_R8;
8490 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8491 ins->type = STACK_R8;
8492 ins->dreg = alloc_dreg (cfg, STACK_R8);
8494 MONO_ADD_INS (bblock, ins);
8503 MonoInst *temp, *store;
8505 CHECK_STACK_OVF (1);
8509 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8510 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8512 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8515 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8528 if (sp [0]->type == STACK_R8)
8529 /* we need to pop the value from the x86 FP stack */
8530 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8536 INLINE_FAILURE ("jmp");
8537 GSHAREDVT_FAILURE (*ip);
8540 if (stack_start != sp)
8542 token = read32 (ip + 1);
8543 /* FIXME: check the signature matches */
8544 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8546 if (!cmethod || mono_loader_get_last_error ())
8549 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8550 GENERIC_SHARING_FAILURE (CEE_JMP);
8552 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8554 if (ARCH_HAVE_OP_TAIL_CALL) {
8555 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8558 /* Handle tail calls similarly to calls */
8559 n = fsig->param_count + fsig->hasthis;
8563 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8564 call->method = cmethod;
8565 call->tail_call = TRUE;
8566 call->signature = mono_method_signature (cmethod);
8567 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8568 call->inst.inst_p0 = cmethod;
8569 for (i = 0; i < n; ++i)
8570 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8572 mono_arch_emit_call (cfg, call);
8573 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8574 MONO_ADD_INS (bblock, (MonoInst*)call);
8576 for (i = 0; i < num_args; ++i)
8577 /* Prevent arguments from being optimized away */
8578 arg_array [i]->flags |= MONO_INST_VOLATILE;
8580 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8581 ins = (MonoInst*)call;
8582 ins->inst_p0 = cmethod;
8583 MONO_ADD_INS (bblock, ins);
8587 start_new_bblock = 1;
8592 MonoMethodSignature *fsig;
8595 token = read32 (ip + 1);
8599 //GSHAREDVT_FAILURE (*ip);
8604 fsig = mini_get_signature (method, token, generic_context);
8606 if (method->dynamic && fsig->pinvoke) {
8610 * This is a call through a function pointer using a pinvoke
8611 * signature. Have to create a wrapper and call that instead.
8612 * FIXME: This is very slow, need to create a wrapper at JIT time
8613 * instead based on the signature.
8615 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8616 EMIT_NEW_PCONST (cfg, args [1], fsig);
8618 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8621 n = fsig->param_count + fsig->hasthis;
8625 //g_assert (!virtual || fsig->hasthis);
8629 inline_costs += 10 * num_calls++;
8632 * Making generic calls out of gsharedvt methods.
8633 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8634 * patching gshared method addresses into a gsharedvt method.
8636 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8638 * We pass the address to the gsharedvt trampoline in the rgctx reg
8640 MonoInst *callee = addr;
8642 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8644 GSHAREDVT_FAILURE (*ip);
8646 addr = emit_get_rgctx_sig (cfg, context_used,
8647 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8648 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8652 /* Prevent inlining of methods with indirect calls */
8653 INLINE_FAILURE ("indirect call");
8655 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8660 * Instead of emitting an indirect call, emit a direct call
8661 * with the contents of the aotconst as the patch info.
8663 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8664 info_type = addr->inst_c1;
8665 info_data = addr->inst_p0;
8667 info_type = addr->inst_right->inst_c1;
8668 info_data = addr->inst_right->inst_left;
8671 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8672 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8677 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8681 /* End of call, INS should contain the result of the call, if any */
8683 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8685 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8688 CHECK_CFG_EXCEPTION;
8692 constrained_class = NULL;
8696 case CEE_CALLVIRT: {
8697 MonoInst *addr = NULL;
8698 MonoMethodSignature *fsig = NULL;
8700 int virtual = *ip == CEE_CALLVIRT;
8701 gboolean pass_imt_from_rgctx = FALSE;
8702 MonoInst *imt_arg = NULL;
8703 MonoInst *keep_this_alive = NULL;
8704 gboolean pass_vtable = FALSE;
8705 gboolean pass_mrgctx = FALSE;
8706 MonoInst *vtable_arg = NULL;
8707 gboolean check_this = FALSE;
8708 gboolean supported_tail_call = FALSE;
8709 gboolean tail_call = FALSE;
8710 gboolean need_seq_point = FALSE;
8711 guint32 call_opcode = *ip;
8712 gboolean emit_widen = TRUE;
8713 gboolean push_res = TRUE;
8714 gboolean skip_ret = FALSE;
8715 gboolean delegate_invoke = FALSE;
8716 gboolean direct_icall = FALSE;
8717 gboolean constrained_partial_call = FALSE;
8718 MonoMethod *cil_method;
8721 token = read32 (ip + 1);
8725 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8726 cil_method = cmethod;
8728 if (constrained_class) {
8729 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8730 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8731 g_assert (!cmethod->klass->valuetype);
8732 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
8733 constrained_partial_call = TRUE;
8737 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8738 if (cfg->verbose_level > 2)
8739 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8740 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8741 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8742 cfg->generic_sharing_context)) {
8743 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8747 if (cfg->verbose_level > 2)
8748 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8750 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8752 * This is needed since get_method_constrained can't find
8753 * the method in klass representing a type var.
8754 * The type var is guaranteed to be a reference type in this
8757 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8758 g_assert (!cmethod->klass->valuetype);
8760 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8766 if (!cmethod || mono_loader_get_last_error ())
8768 if (!dont_verify && !cfg->skip_visibility) {
8769 MonoMethod *target_method = cil_method;
8770 if (method->is_inflated) {
8771 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8773 if (!mono_method_can_access_method (method_definition, target_method) &&
8774 !mono_method_can_access_method (method, cil_method))
8775 METHOD_ACCESS_FAILURE (method, cil_method);
8778 if (mono_security_core_clr_enabled ())
8779 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8781 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8782 /* MS.NET seems to silently convert this to a callvirt */
8787 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8788 * converts to a callvirt.
8790 * tests/bug-515884.il is an example of this behavior
8792 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8793 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8794 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8798 if (!cmethod->klass->inited)
8799 if (!mono_class_init (cmethod->klass))
8800 TYPE_LOAD_ERROR (cmethod->klass);
8802 fsig = mono_method_signature (cmethod);
8805 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8806 mini_class_is_system_array (cmethod->klass)) {
8807 array_rank = cmethod->klass->rank;
8808 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8809 direct_icall = TRUE;
8810 } else if (fsig->pinvoke) {
8811 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8812 check_for_pending_exc, cfg->compile_aot);
8813 fsig = mono_method_signature (wrapper);
8814 } else if (constrained_class) {
8816 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8820 mono_save_token_info (cfg, image, token, cil_method);
8822 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8823 need_seq_point = TRUE;
8825 /* Don't support calls made using type arguments for now */
8827 if (cfg->gsharedvt) {
8828 if (mini_is_gsharedvt_signature (cfg, fsig))
8829 GSHAREDVT_FAILURE (*ip);
8833 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8834 g_assert_not_reached ();
8836 n = fsig->param_count + fsig->hasthis;
8838 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8841 if (!cfg->generic_sharing_context)
8842 g_assert (!mono_method_check_context_used (cmethod));
8846 //g_assert (!virtual || fsig->hasthis);
8850 if (constrained_class) {
8851 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8852 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8853 /* The 'Own method' case below */
8854 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8855 /* 'The type parameter is instantiated as a reference type' case below. */
8857 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
8858 CHECK_CFG_EXCEPTION;
8865 * We have the `constrained.' prefix opcode.
8867 if (constrained_partial_call) {
8868 gboolean need_box = TRUE;
8871 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8872 * called method is not known at compile time either. The called method could end up being
8873 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8874 * to box the receiver.
8875 * A simple solution would be to box always and make a normal virtual call, but that would
8876 * be bad performance wise.
8878 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8880 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8887 MonoBasicBlock *is_ref_bb, *end_bb;
8888 MonoInst *nonbox_call;
8891 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8893 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8894 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8896 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8898 NEW_BBLOCK (cfg, is_ref_bb);
8899 NEW_BBLOCK (cfg, end_bb);
8901 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8903 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8906 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8911 MONO_START_BB (cfg, is_ref_bb);
8912 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8913 ins->klass = constrained_class;
8914 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8915 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8919 MONO_START_BB (cfg, end_bb);
8922 nonbox_call->dreg = ins->dreg;
8924 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8925 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8926 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8929 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8931 * The type parameter is instantiated as a valuetype,
8932 * but that type doesn't override the method we're
8933 * calling, so we need to box `this'.
8935 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8936 ins->klass = constrained_class;
8937 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8938 CHECK_CFG_EXCEPTION;
8939 } else if (!constrained_class->valuetype) {
8940 int dreg = alloc_ireg_ref (cfg);
8943 * The type parameter is instantiated as a reference
8944 * type. We have a managed pointer on the stack, so
8945 * we need to dereference it here.
8947 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8948 ins->type = STACK_OBJ;
8951 if (cmethod->klass->valuetype) {
8954 /* Interface method */
8957 mono_class_setup_vtable (constrained_class);
8958 CHECK_TYPELOAD (constrained_class);
8959 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8961 TYPE_LOAD_ERROR (constrained_class);
8962 slot = mono_method_get_vtable_slot (cmethod);
8964 TYPE_LOAD_ERROR (cmethod->klass);
8965 cmethod = constrained_class->vtable [ioffset + slot];
8967 if (cmethod->klass == mono_defaults.enum_class) {
8968 /* Enum implements some interfaces, so treat this as the first case */
8969 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8970 ins->klass = constrained_class;
8971 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8972 CHECK_CFG_EXCEPTION;
8977 constrained_class = NULL;
8980 if (check_call_signature (cfg, fsig, sp))
8983 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8984 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8985 delegate_invoke = TRUE;
8988 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8990 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8991 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8999 * If the callee is a shared method, then its static cctor
9000 * might not get called after the call was patched.
9002 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9003 emit_generic_class_init (cfg, cmethod->klass);
9004 CHECK_TYPELOAD (cmethod->klass);
9007 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9009 if (cfg->generic_sharing_context) {
9010 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9012 context_used = mini_method_check_context_used (cfg, cmethod);
9014 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9015 /* Generic method interface
9016 calls are resolved via a
9017 helper function and don't
9019 if (!cmethod_context || !cmethod_context->method_inst)
9020 pass_imt_from_rgctx = TRUE;
9024 * If a shared method calls another
9025 * shared method then the caller must
9026 * have a generic sharing context
9027 * because the magic trampoline
9028 * requires it. FIXME: We shouldn't
9029 * have to force the vtable/mrgctx
9030 * variable here. Instead there
9031 * should be a flag in the cfg to
9032 * request a generic sharing context.
9035 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9036 mono_get_vtable_var (cfg);
9041 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9043 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9045 CHECK_TYPELOAD (cmethod->klass);
9046 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9051 g_assert (!vtable_arg);
9053 if (!cfg->compile_aot) {
9055 * emit_get_rgctx_method () calls mono_class_vtable () so check
9056 * for type load errors before.
9058 mono_class_setup_vtable (cmethod->klass);
9059 CHECK_TYPELOAD (cmethod->klass);
9062 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9064 /* !marshalbyref is needed to properly handle generic methods + remoting */
9065 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9066 MONO_METHOD_IS_FINAL (cmethod)) &&
9067 !mono_class_is_marshalbyref (cmethod->klass)) {
9074 if (pass_imt_from_rgctx) {
9075 g_assert (!pass_vtable);
9077 imt_arg = emit_get_rgctx_method (cfg, context_used,
9078 cmethod, MONO_RGCTX_INFO_METHOD);
9082 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9084 /* Calling virtual generic methods */
9085 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9086 !(MONO_METHOD_IS_FINAL (cmethod) &&
9087 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9088 fsig->generic_param_count &&
9089 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9090 MonoInst *this_temp, *this_arg_temp, *store;
9091 MonoInst *iargs [4];
9092 gboolean use_imt = FALSE;
9094 g_assert (fsig->is_inflated);
9096 /* Prevent inlining of methods that contain indirect calls */
9097 INLINE_FAILURE ("virtual generic call");
9099 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9100 GSHAREDVT_FAILURE (*ip);
9102 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9103 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9108 g_assert (!imt_arg);
9110 g_assert (cmethod->is_inflated);
9111 imt_arg = emit_get_rgctx_method (cfg, context_used,
9112 cmethod, MONO_RGCTX_INFO_METHOD);
9113 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9115 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9116 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9117 MONO_ADD_INS (bblock, store);
9119 /* FIXME: This should be a managed pointer */
9120 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9122 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9123 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9124 cmethod, MONO_RGCTX_INFO_METHOD);
9125 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9126 addr = mono_emit_jit_icall (cfg,
9127 mono_helper_compile_generic_method, iargs);
9129 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9131 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9138 * Implement a workaround for the inherent races involved in locking:
9144 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9145 * try block, the Exit () won't be executed, see:
9146 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9147 * To work around this, we extend such try blocks to include the last x bytes
9148 * of the Monitor.Enter () call.
9150 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9151 MonoBasicBlock *tbb;
9153 GET_BBLOCK (cfg, tbb, ip + 5);
9155 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9156 * from Monitor.Enter like ArgumentNullException.
9158 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9159 /* Mark this bblock as needing to be extended */
9160 tbb->extend_try_block = TRUE;
9164 /* Conversion to a JIT intrinsic */
9165 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9167 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9168 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9175 if ((cfg->opt & MONO_OPT_INLINE) &&
9176 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9177 mono_method_check_inlining (cfg, cmethod)) {
9179 gboolean always = FALSE;
9181 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9182 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9183 /* Prevent inlining of methods that call wrappers */
9184 INLINE_FAILURE ("wrapper call");
9185 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9189 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9191 cfg->real_offset += 5;
9193 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9194 /* *sp is already set by inline_method */
9199 inline_costs += costs;
9205 /* Tail recursion elimination */
9206 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9207 gboolean has_vtargs = FALSE;
9210 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9211 INLINE_FAILURE ("tail call");
9213 /* keep it simple */
9214 for (i = fsig->param_count - 1; i >= 0; i--) {
9215 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9220 for (i = 0; i < n; ++i)
9221 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9222 MONO_INST_NEW (cfg, ins, OP_BR);
9223 MONO_ADD_INS (bblock, ins);
9224 tblock = start_bblock->out_bb [0];
9225 link_bblock (cfg, bblock, tblock);
9226 ins->inst_target_bb = tblock;
9227 start_new_bblock = 1;
9229 /* skip the CEE_RET, too */
9230 if (ip_in_bb (cfg, bblock, ip + 5))
9237 inline_costs += 10 * num_calls++;
9240 * Making generic calls out of gsharedvt methods.
9241 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9242 * patching gshared method addresses into a gsharedvt method.
9244 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9245 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9246 MonoRgctxInfoType info_type;
9249 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9250 //GSHAREDVT_FAILURE (*ip);
9251 // disable for possible remoting calls
9252 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9253 GSHAREDVT_FAILURE (*ip);
9254 if (fsig->generic_param_count) {
9255 /* virtual generic call */
9256 g_assert (!imt_arg);
9257 /* Same as the virtual generic case above */
9258 imt_arg = emit_get_rgctx_method (cfg, context_used,
9259 cmethod, MONO_RGCTX_INFO_METHOD);
9260 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9262 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9263 /* This can happen when we call a fully instantiated iface method */
9264 imt_arg = emit_get_rgctx_method (cfg, context_used,
9265 cmethod, MONO_RGCTX_INFO_METHOD);
9270 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9271 keep_this_alive = sp [0];
9273 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9274 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9276 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9277 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9279 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9283 /* Generic sharing */
9286 * Use this if the callee is gsharedvt sharable too, since
9287 * at runtime we might find an instantiation so the call cannot
9288 * be patched (the 'no_patch' code path in mini-trampolines.c).
9290 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9291 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9292 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9293 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9294 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9295 INLINE_FAILURE ("gshared");
9297 g_assert (cfg->generic_sharing_context && cmethod);
9301 * We are compiling a call to a
9302 * generic method from shared code,
9303 * which means that we have to look up
9304 * the method in the rgctx and do an
9308 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9310 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9311 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9315 /* Direct calls to icalls */
9317 MonoMethod *wrapper;
9320 /* Inline the wrapper */
9321 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9323 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
9324 g_assert (costs > 0);
9325 cfg->real_offset += 5;
9327 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9328 /* *sp is already set by inline_method */
9333 inline_costs += costs;
9342 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9343 MonoInst *val = sp [fsig->param_count];
9345 if (val->type == STACK_OBJ) {
9346 MonoInst *iargs [2];
9351 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9354 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9355 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9356 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9357 emit_write_barrier (cfg, addr, val);
9358 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9359 GSHAREDVT_FAILURE (*ip);
9360 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9361 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9363 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9364 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9365 if (!cmethod->klass->element_class->valuetype && !readonly)
9366 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9367 CHECK_TYPELOAD (cmethod->klass);
9370 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9373 g_assert_not_reached ();
9380 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9384 /* Tail prefix / tail call optimization */
9386 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9387 /* FIXME: runtime generic context pointer for jumps? */
9388 /* FIXME: handle this for generic sharing eventually */
9389 if ((ins_flag & MONO_INST_TAILCALL) &&
9390 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9391 supported_tail_call = TRUE;
9393 if (supported_tail_call) {
9396 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9397 INLINE_FAILURE ("tail call");
9399 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9401 if (ARCH_HAVE_OP_TAIL_CALL) {
9402 /* Handle tail calls similarly to normal calls */
9405 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9407 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9408 call->tail_call = TRUE;
9409 call->method = cmethod;
9410 call->signature = mono_method_signature (cmethod);
9413 * We implement tail calls by storing the actual arguments into the
9414 * argument variables, then emitting a CEE_JMP.
9416 for (i = 0; i < n; ++i) {
9417 /* Prevent argument from being register allocated */
9418 arg_array [i]->flags |= MONO_INST_VOLATILE;
9419 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9421 ins = (MonoInst*)call;
9422 ins->inst_p0 = cmethod;
9423 ins->inst_p1 = arg_array [0];
9424 MONO_ADD_INS (bblock, ins);
9425 link_bblock (cfg, bblock, end_bblock);
9426 start_new_bblock = 1;
9428 // FIXME: Eliminate unreachable epilogs
9431 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9432 * only reachable from this call.
9434 GET_BBLOCK (cfg, tblock, ip + 5);
9435 if (tblock == bblock || tblock->in_count == 0)
9444 * Synchronized wrappers.
9445 * Its hard to determine where to replace a method with its synchronized
9446 * wrapper without causing an infinite recursion. The current solution is
9447 * to add the synchronized wrapper in the trampolines, and to
9448 * change the called method to a dummy wrapper, and resolve that wrapper
9449 * to the real method in mono_jit_compile_method ().
9451 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9452 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9453 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9454 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9458 INLINE_FAILURE ("call");
9459 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9460 imt_arg, vtable_arg);
9463 link_bblock (cfg, bblock, end_bblock);
9464 start_new_bblock = 1;
9466 // FIXME: Eliminate unreachable epilogs
9469 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9470 * only reachable from this call.
9472 GET_BBLOCK (cfg, tblock, ip + 5);
9473 if (tblock == bblock || tblock->in_count == 0)
9480 /* End of call, INS should contain the result of the call, if any */
9482 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9485 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9490 if (keep_this_alive) {
9491 MonoInst *dummy_use;
9493 /* See mono_emit_method_call_full () */
9494 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9497 CHECK_CFG_EXCEPTION;
9501 g_assert (*ip == CEE_RET);
9505 constrained_class = NULL;
9507 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9511 if (cfg->method != method) {
9512 /* return from inlined method */
9514 * If in_count == 0, that means the ret is unreachable due to
9515 * being preceeded by a throw. In that case, inline_method () will
9516 * handle setting the return value
9517 * (test case: test_0_inline_throw ()).
9519 if (return_var && cfg->cbb->in_count) {
9520 MonoType *ret_type = mono_method_signature (method)->ret;
9526 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9529 //g_assert (returnvar != -1);
9530 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9531 cfg->ret_var_set = TRUE;
9534 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9536 if (cfg->lmf_var && cfg->cbb->in_count)
9540 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9542 if (seq_points && !sym_seq_points) {
9544 * Place a seq point here too even through the IL stack is not
9545 * empty, so a step over on
9548 * will work correctly.
9550 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9551 MONO_ADD_INS (cfg->cbb, ins);
9554 g_assert (!return_var);
9558 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9561 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9564 if (!cfg->vret_addr) {
9567 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9569 EMIT_NEW_RETLOADA (cfg, ret_addr);
9571 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9572 ins->klass = mono_class_from_mono_type (ret_type);
9575 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9576 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9577 MonoInst *iargs [1];
9581 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9582 mono_arch_emit_setret (cfg, method, conv);
9584 mono_arch_emit_setret (cfg, method, *sp);
9587 mono_arch_emit_setret (cfg, method, *sp);
9592 if (sp != stack_start)
9594 MONO_INST_NEW (cfg, ins, OP_BR);
9596 ins->inst_target_bb = end_bblock;
9597 MONO_ADD_INS (bblock, ins);
9598 link_bblock (cfg, bblock, end_bblock);
9599 start_new_bblock = 1;
9603 MONO_INST_NEW (cfg, ins, OP_BR);
9605 target = ip + 1 + (signed char)(*ip);
9607 GET_BBLOCK (cfg, tblock, target);
9608 link_bblock (cfg, bblock, tblock);
9609 ins->inst_target_bb = tblock;
9610 if (sp != stack_start) {
9611 handle_stack_args (cfg, stack_start, sp - stack_start);
9613 CHECK_UNVERIFIABLE (cfg);
9615 MONO_ADD_INS (bblock, ins);
9616 start_new_bblock = 1;
9617 inline_costs += BRANCH_COST;
9631 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9633 target = ip + 1 + *(signed char*)ip;
9639 inline_costs += BRANCH_COST;
9643 MONO_INST_NEW (cfg, ins, OP_BR);
9646 target = ip + 4 + (gint32)read32(ip);
9648 GET_BBLOCK (cfg, tblock, target);
9649 link_bblock (cfg, bblock, tblock);
9650 ins->inst_target_bb = tblock;
9651 if (sp != stack_start) {
9652 handle_stack_args (cfg, stack_start, sp - stack_start);
9654 CHECK_UNVERIFIABLE (cfg);
9657 MONO_ADD_INS (bblock, ins);
9659 start_new_bblock = 1;
9660 inline_costs += BRANCH_COST;
9667 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9668 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9669 guint32 opsize = is_short ? 1 : 4;
9671 CHECK_OPSIZE (opsize);
9673 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9676 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9681 GET_BBLOCK (cfg, tblock, target);
9682 link_bblock (cfg, bblock, tblock);
9683 GET_BBLOCK (cfg, tblock, ip);
9684 link_bblock (cfg, bblock, tblock);
9686 if (sp != stack_start) {
9687 handle_stack_args (cfg, stack_start, sp - stack_start);
9688 CHECK_UNVERIFIABLE (cfg);
9691 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9692 cmp->sreg1 = sp [0]->dreg;
9693 type_from_op (cfg, cmp, sp [0], NULL);
9696 #if SIZEOF_REGISTER == 4
9697 if (cmp->opcode == OP_LCOMPARE_IMM) {
9698 /* Convert it to OP_LCOMPARE */
9699 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9700 ins->type = STACK_I8;
9701 ins->dreg = alloc_dreg (cfg, STACK_I8);
9703 MONO_ADD_INS (bblock, ins);
9704 cmp->opcode = OP_LCOMPARE;
9705 cmp->sreg2 = ins->dreg;
9708 MONO_ADD_INS (bblock, cmp);
9710 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9711 type_from_op (cfg, ins, sp [0], NULL);
9712 MONO_ADD_INS (bblock, ins);
9713 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9714 GET_BBLOCK (cfg, tblock, target);
9715 ins->inst_true_bb = tblock;
9716 GET_BBLOCK (cfg, tblock, ip);
9717 ins->inst_false_bb = tblock;
9718 start_new_bblock = 2;
9721 inline_costs += BRANCH_COST;
9736 MONO_INST_NEW (cfg, ins, *ip);
9738 target = ip + 4 + (gint32)read32(ip);
9744 inline_costs += BRANCH_COST;
9748 MonoBasicBlock **targets;
9749 MonoBasicBlock *default_bblock;
9750 MonoJumpInfoBBTable *table;
9751 int offset_reg = alloc_preg (cfg);
9752 int target_reg = alloc_preg (cfg);
9753 int table_reg = alloc_preg (cfg);
9754 int sum_reg = alloc_preg (cfg);
9755 gboolean use_op_switch;
9759 n = read32 (ip + 1);
9762 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9766 CHECK_OPSIZE (n * sizeof (guint32));
9767 target = ip + n * sizeof (guint32);
9769 GET_BBLOCK (cfg, default_bblock, target);
9770 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9772 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9773 for (i = 0; i < n; ++i) {
9774 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9775 targets [i] = tblock;
9776 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9780 if (sp != stack_start) {
9782 * Link the current bb with the targets as well, so handle_stack_args
9783 * will set their in_stack correctly.
9785 link_bblock (cfg, bblock, default_bblock);
9786 for (i = 0; i < n; ++i)
9787 link_bblock (cfg, bblock, targets [i]);
9789 handle_stack_args (cfg, stack_start, sp - stack_start);
9791 CHECK_UNVERIFIABLE (cfg);
9794 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9795 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9798 for (i = 0; i < n; ++i)
9799 link_bblock (cfg, bblock, targets [i]);
9801 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9802 table->table = targets;
9803 table->table_size = n;
9805 use_op_switch = FALSE;
9807 /* ARM implements SWITCH statements differently */
9808 /* FIXME: Make it use the generic implementation */
9809 if (!cfg->compile_aot)
9810 use_op_switch = TRUE;
9813 if (COMPILE_LLVM (cfg))
9814 use_op_switch = TRUE;
9816 cfg->cbb->has_jump_table = 1;
9818 if (use_op_switch) {
9819 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9820 ins->sreg1 = src1->dreg;
9821 ins->inst_p0 = table;
9822 ins->inst_many_bb = targets;
9823 ins->klass = GUINT_TO_POINTER (n);
9824 MONO_ADD_INS (cfg->cbb, ins);
9826 if (sizeof (gpointer) == 8)
9827 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9831 #if SIZEOF_REGISTER == 8
9832 /* The upper word might not be zero, and we add it to a 64 bit address later */
9833 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9836 if (cfg->compile_aot) {
9837 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9839 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9840 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9841 ins->inst_p0 = table;
9842 ins->dreg = table_reg;
9843 MONO_ADD_INS (cfg->cbb, ins);
9846 /* FIXME: Use load_memindex */
9847 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9848 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9849 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9851 start_new_bblock = 1;
9852 inline_costs += (BRANCH_COST * 2);
9872 dreg = alloc_freg (cfg);
9875 dreg = alloc_lreg (cfg);
9878 dreg = alloc_ireg_ref (cfg);
9881 dreg = alloc_preg (cfg);
9884 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9885 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9886 if (*ip == CEE_LDIND_R4)
9887 ins->type = cfg->r4_stack_type;
9888 ins->flags |= ins_flag;
9889 MONO_ADD_INS (bblock, ins);
9891 if (ins_flag & MONO_INST_VOLATILE) {
9892 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9893 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9909 if (ins_flag & MONO_INST_VOLATILE) {
9910 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9911 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9914 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9915 ins->flags |= ins_flag;
9918 MONO_ADD_INS (bblock, ins);
9920 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9921 emit_write_barrier (cfg, sp [0], sp [1]);
9930 MONO_INST_NEW (cfg, ins, (*ip));
9932 ins->sreg1 = sp [0]->dreg;
9933 ins->sreg2 = sp [1]->dreg;
9934 type_from_op (cfg, ins, sp [0], sp [1]);
9936 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9938 /* Use the immediate opcodes if possible */
9939 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9940 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9941 if (imm_opcode != -1) {
9942 ins->opcode = imm_opcode;
9943 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9946 NULLIFY_INS (sp [1]);
9950 MONO_ADD_INS ((cfg)->cbb, (ins));
9952 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
9969 MONO_INST_NEW (cfg, ins, (*ip));
9971 ins->sreg1 = sp [0]->dreg;
9972 ins->sreg2 = sp [1]->dreg;
9973 type_from_op (cfg, ins, sp [0], sp [1]);
9975 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9976 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9978 /* FIXME: Pass opcode to is_inst_imm */
9980 /* Use the immediate opcodes if possible */
9981 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9984 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9985 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9986 /* Keep emulated opcodes which are optimized away later */
9987 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9988 imm_opcode = mono_op_to_op_imm (ins->opcode);
9991 if (imm_opcode != -1) {
9992 ins->opcode = imm_opcode;
9993 if (sp [1]->opcode == OP_I8CONST) {
9994 #if SIZEOF_REGISTER == 8
9995 ins->inst_imm = sp [1]->inst_l;
9997 ins->inst_ls_word = sp [1]->inst_ls_word;
9998 ins->inst_ms_word = sp [1]->inst_ms_word;
10002 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10005 /* Might be followed by an instruction added by add_widen_op */
10006 if (sp [1]->next == NULL)
10007 NULLIFY_INS (sp [1]);
10010 MONO_ADD_INS ((cfg)->cbb, (ins));
10012 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10025 case CEE_CONV_OVF_I8:
10026 case CEE_CONV_OVF_U8:
10027 case CEE_CONV_R_UN:
10030 /* Special case this earlier so we have long constants in the IR */
10031 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10032 int data = sp [-1]->inst_c0;
10033 sp [-1]->opcode = OP_I8CONST;
10034 sp [-1]->type = STACK_I8;
10035 #if SIZEOF_REGISTER == 8
10036 if ((*ip) == CEE_CONV_U8)
10037 sp [-1]->inst_c0 = (guint32)data;
10039 sp [-1]->inst_c0 = data;
10041 sp [-1]->inst_ls_word = data;
10042 if ((*ip) == CEE_CONV_U8)
10043 sp [-1]->inst_ms_word = 0;
10045 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10047 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10054 case CEE_CONV_OVF_I4:
10055 case CEE_CONV_OVF_I1:
10056 case CEE_CONV_OVF_I2:
10057 case CEE_CONV_OVF_I:
10058 case CEE_CONV_OVF_U:
10061 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10062 ADD_UNOP (CEE_CONV_OVF_I8);
10069 case CEE_CONV_OVF_U1:
10070 case CEE_CONV_OVF_U2:
10071 case CEE_CONV_OVF_U4:
10074 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10075 ADD_UNOP (CEE_CONV_OVF_U8);
10082 case CEE_CONV_OVF_I1_UN:
10083 case CEE_CONV_OVF_I2_UN:
10084 case CEE_CONV_OVF_I4_UN:
10085 case CEE_CONV_OVF_I8_UN:
10086 case CEE_CONV_OVF_U1_UN:
10087 case CEE_CONV_OVF_U2_UN:
10088 case CEE_CONV_OVF_U4_UN:
10089 case CEE_CONV_OVF_U8_UN:
10090 case CEE_CONV_OVF_I_UN:
10091 case CEE_CONV_OVF_U_UN:
10098 CHECK_CFG_EXCEPTION;
10102 case CEE_ADD_OVF_UN:
10104 case CEE_MUL_OVF_UN:
10106 case CEE_SUB_OVF_UN:
10112 GSHAREDVT_FAILURE (*ip);
10115 token = read32 (ip + 1);
10116 klass = mini_get_class (method, token, generic_context);
10117 CHECK_TYPELOAD (klass);
10119 if (generic_class_is_reference_type (cfg, klass)) {
10120 MonoInst *store, *load;
10121 int dreg = alloc_ireg_ref (cfg);
10123 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10124 load->flags |= ins_flag;
10125 MONO_ADD_INS (cfg->cbb, load);
10127 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10128 store->flags |= ins_flag;
10129 MONO_ADD_INS (cfg->cbb, store);
10131 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10132 emit_write_barrier (cfg, sp [0], sp [1]);
10134 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10140 int loc_index = -1;
10146 token = read32 (ip + 1);
10147 klass = mini_get_class (method, token, generic_context);
10148 CHECK_TYPELOAD (klass);
10150 /* Optimize the common ldobj+stloc combination */
10153 loc_index = ip [6];
10160 loc_index = ip [5] - CEE_STLOC_0;
10167 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10168 CHECK_LOCAL (loc_index);
10170 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10171 ins->dreg = cfg->locals [loc_index]->dreg;
10172 ins->flags |= ins_flag;
10175 if (ins_flag & MONO_INST_VOLATILE) {
10176 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10177 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10183 /* Optimize the ldobj+stobj combination */
10184 /* The reference case ends up being a load+store anyway */
10185 /* Skip this if the operation is volatile. */
10186 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10191 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10198 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10199 ins->flags |= ins_flag;
10202 if (ins_flag & MONO_INST_VOLATILE) {
10203 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10204 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10213 CHECK_STACK_OVF (1);
10215 n = read32 (ip + 1);
10217 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10218 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10219 ins->type = STACK_OBJ;
10222 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10223 MonoInst *iargs [1];
10224 char *str = mono_method_get_wrapper_data (method, n);
10226 if (cfg->compile_aot)
10227 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10229 EMIT_NEW_PCONST (cfg, iargs [0], str);
10230 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10232 if (cfg->opt & MONO_OPT_SHARED) {
10233 MonoInst *iargs [3];
10235 if (cfg->compile_aot) {
10236 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10238 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10239 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10240 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10241 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10242 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10244 if (bblock->out_of_line) {
10245 MonoInst *iargs [2];
10247 if (image == mono_defaults.corlib) {
10249 * Avoid relocations in AOT and save some space by using a
10250 * version of helper_ldstr specialized to mscorlib.
10252 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10253 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10255 /* Avoid creating the string object */
10256 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10257 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10258 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10262 if (cfg->compile_aot) {
10263 NEW_LDSTRCONST (cfg, ins, image, n);
10265 MONO_ADD_INS (bblock, ins);
10268 NEW_PCONST (cfg, ins, NULL);
10269 ins->type = STACK_OBJ;
10270 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10272 OUT_OF_MEMORY_FAILURE;
10275 MONO_ADD_INS (bblock, ins);
10284 MonoInst *iargs [2];
10285 MonoMethodSignature *fsig;
10288 MonoInst *vtable_arg = NULL;
10291 token = read32 (ip + 1);
10292 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10293 if (!cmethod || mono_loader_get_last_error ())
10295 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10298 mono_save_token_info (cfg, image, token, cmethod);
10300 if (!mono_class_init (cmethod->klass))
10301 TYPE_LOAD_ERROR (cmethod->klass);
10303 context_used = mini_method_check_context_used (cfg, cmethod);
10305 if (mono_security_core_clr_enabled ())
10306 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10308 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10309 emit_generic_class_init (cfg, cmethod->klass);
10310 CHECK_TYPELOAD (cmethod->klass);
10314 if (cfg->gsharedvt) {
10315 if (mini_is_gsharedvt_variable_signature (sig))
10316 GSHAREDVT_FAILURE (*ip);
10320 n = fsig->param_count;
10324 * Generate smaller code for the common newobj <exception> instruction in
10325 * argument checking code.
10327 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10328 is_exception_class (cmethod->klass) && n <= 2 &&
10329 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10330 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10331 MonoInst *iargs [3];
10335 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10338 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10341 iargs [1] = sp [0];
10342 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10345 iargs [1] = sp [0];
10346 iargs [2] = sp [1];
10347 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10350 g_assert_not_reached ();
10358 /* move the args to allow room for 'this' in the first position */
10364 /* check_call_signature () requires sp[0] to be set */
10365 this_ins.type = STACK_OBJ;
10366 sp [0] = &this_ins;
10367 if (check_call_signature (cfg, fsig, sp))
10372 if (mini_class_is_system_array (cmethod->klass)) {
10373 *sp = emit_get_rgctx_method (cfg, context_used,
10374 cmethod, MONO_RGCTX_INFO_METHOD);
10376 /* Avoid varargs in the common case */
10377 if (fsig->param_count == 1)
10378 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10379 else if (fsig->param_count == 2)
10380 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10381 else if (fsig->param_count == 3)
10382 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10383 else if (fsig->param_count == 4)
10384 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10386 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10387 } else if (cmethod->string_ctor) {
10388 g_assert (!context_used);
10389 g_assert (!vtable_arg);
10390 /* we simply pass a null pointer */
10391 EMIT_NEW_PCONST (cfg, *sp, NULL);
10392 /* now call the string ctor */
10393 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10395 if (cmethod->klass->valuetype) {
10396 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10397 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10398 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10403 * The code generated by mini_emit_virtual_call () expects
10404 * iargs [0] to be a boxed instance, but luckily the vcall
10405 * will be transformed into a normal call there.
10407 } else if (context_used) {
10408 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10411 MonoVTable *vtable = NULL;
10413 if (!cfg->compile_aot)
10414 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10415 CHECK_TYPELOAD (cmethod->klass);
10418 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10419 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10420 * As a workaround, we call class cctors before allocating objects.
10422 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10423 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10424 if (cfg->verbose_level > 2)
10425 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10426 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10429 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10432 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10435 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10437 /* Now call the actual ctor */
10438 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10439 CHECK_CFG_EXCEPTION;
10442 if (alloc == NULL) {
10444 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10445 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10453 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10454 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10457 case CEE_CASTCLASS:
10461 token = read32 (ip + 1);
10462 klass = mini_get_class (method, token, generic_context);
10463 CHECK_TYPELOAD (klass);
10464 if (sp [0]->type != STACK_OBJ)
10467 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10468 CHECK_CFG_EXCEPTION;
10477 token = read32 (ip + 1);
10478 klass = mini_get_class (method, token, generic_context);
10479 CHECK_TYPELOAD (klass);
10480 if (sp [0]->type != STACK_OBJ)
10483 context_used = mini_class_check_context_used (cfg, klass);
10485 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10486 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10487 MonoInst *args [3];
10494 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10497 if (cfg->compile_aot) {
10498 idx = get_castclass_cache_idx (cfg);
10499 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10501 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10504 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10507 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10508 MonoMethod *mono_isinst;
10509 MonoInst *iargs [1];
10512 mono_isinst = mono_marshal_get_isinst (klass);
10513 iargs [0] = sp [0];
10515 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10516 iargs, ip, cfg->real_offset, TRUE, &bblock);
10517 CHECK_CFG_EXCEPTION;
10518 g_assert (costs > 0);
10521 cfg->real_offset += 5;
10525 inline_costs += costs;
10528 ins = handle_isinst (cfg, klass, *sp, context_used);
10529 CHECK_CFG_EXCEPTION;
10536 case CEE_UNBOX_ANY: {
10537 MonoInst *res, *addr;
10542 token = read32 (ip + 1);
10543 klass = mini_get_class (method, token, generic_context);
10544 CHECK_TYPELOAD (klass);
10546 mono_save_token_info (cfg, image, token, klass);
10548 context_used = mini_class_check_context_used (cfg, klass);
10550 if (mini_is_gsharedvt_klass (cfg, klass)) {
10551 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10553 } else if (generic_class_is_reference_type (cfg, klass)) {
10554 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10555 CHECK_CFG_EXCEPTION;
10556 } else if (mono_class_is_nullable (klass)) {
10557 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10559 addr = handle_unbox (cfg, klass, sp, context_used);
10561 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10572 MonoClass *enum_class;
10573 MonoMethod *has_flag;
10579 token = read32 (ip + 1);
10580 klass = mini_get_class (method, token, generic_context);
10581 CHECK_TYPELOAD (klass);
10583 mono_save_token_info (cfg, image, token, klass);
10585 context_used = mini_class_check_context_used (cfg, klass);
10587 if (generic_class_is_reference_type (cfg, klass)) {
10593 if (klass == mono_defaults.void_class)
10595 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10597 /* frequent check in generic code: box (struct), brtrue */
10602 * <push int/long ptr>
10605 * constrained. MyFlags
10606 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10608 * If we find this sequence and the operand types on box and constrained
10609 * are equal, we can emit a specialized instruction sequence instead of
10610 * the very slow HasFlag () call.
10612 if ((cfg->opt & MONO_OPT_INTRINS) &&
10613 /* Cheap checks first. */
10614 ip + 5 + 6 + 5 < end &&
10615 ip [5] == CEE_PREFIX1 &&
10616 ip [6] == CEE_CONSTRAINED_ &&
10617 ip [11] == CEE_CALLVIRT &&
10618 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10619 mono_class_is_enum (klass) &&
10620 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10621 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10622 has_flag->klass == mono_defaults.enum_class &&
10623 !strcmp (has_flag->name, "HasFlag") &&
10624 has_flag->signature->hasthis &&
10625 has_flag->signature->param_count == 1) {
10626 CHECK_TYPELOAD (enum_class);
10628 if (enum_class == klass) {
10629 MonoInst *enum_this, *enum_flag;
10634 enum_this = sp [0];
10635 enum_flag = sp [1];
10637 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10642 // FIXME: LLVM can't handle the inconsistent bb linking
10643 if (!mono_class_is_nullable (klass) &&
10644 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10645 (ip [5] == CEE_BRTRUE ||
10646 ip [5] == CEE_BRTRUE_S ||
10647 ip [5] == CEE_BRFALSE ||
10648 ip [5] == CEE_BRFALSE_S)) {
10649 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10651 MonoBasicBlock *true_bb, *false_bb;
10655 if (cfg->verbose_level > 3) {
10656 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10657 printf ("<box+brtrue opt>\n");
10662 case CEE_BRFALSE_S:
10665 target = ip + 1 + (signed char)(*ip);
10672 target = ip + 4 + (gint)(read32 (ip));
10676 g_assert_not_reached ();
10680 * We need to link both bblocks, since it is needed for handling stack
10681 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10682 * Branching to only one of them would lead to inconsistencies, so
10683 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10685 GET_BBLOCK (cfg, true_bb, target);
10686 GET_BBLOCK (cfg, false_bb, ip);
10688 mono_link_bblock (cfg, cfg->cbb, true_bb);
10689 mono_link_bblock (cfg, cfg->cbb, false_bb);
10691 if (sp != stack_start) {
10692 handle_stack_args (cfg, stack_start, sp - stack_start);
10694 CHECK_UNVERIFIABLE (cfg);
10697 if (COMPILE_LLVM (cfg)) {
10698 dreg = alloc_ireg (cfg);
10699 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10702 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10704 /* The JIT can't eliminate the iconst+compare */
10705 MONO_INST_NEW (cfg, ins, OP_BR);
10706 ins->inst_target_bb = is_true ? true_bb : false_bb;
10707 MONO_ADD_INS (cfg->cbb, ins);
10710 start_new_bblock = 1;
10714 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10716 CHECK_CFG_EXCEPTION;
10725 token = read32 (ip + 1);
10726 klass = mini_get_class (method, token, generic_context);
10727 CHECK_TYPELOAD (klass);
10729 mono_save_token_info (cfg, image, token, klass);
10731 context_used = mini_class_check_context_used (cfg, klass);
10733 if (mono_class_is_nullable (klass)) {
10736 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10737 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10741 ins = handle_unbox (cfg, klass, sp, context_used);
10754 MonoClassField *field;
10755 #ifndef DISABLE_REMOTING
10759 gboolean is_instance;
10761 gpointer addr = NULL;
10762 gboolean is_special_static;
10764 MonoInst *store_val = NULL;
10765 MonoInst *thread_ins;
10768 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10770 if (op == CEE_STFLD) {
10773 store_val = sp [1];
10778 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10780 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10783 if (op == CEE_STSFLD) {
10786 store_val = sp [0];
10791 token = read32 (ip + 1);
10792 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10793 field = mono_method_get_wrapper_data (method, token);
10794 klass = field->parent;
10797 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10800 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10801 FIELD_ACCESS_FAILURE (method, field);
10802 mono_class_init (klass);
10804 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10807 /* if the class is Critical then transparent code cannot access it's fields */
10808 if (!is_instance && mono_security_core_clr_enabled ())
10809 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10811 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10812 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10813 if (mono_security_core_clr_enabled ())
10814 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10818 * LDFLD etc. is usable on static fields as well, so convert those cases to
10821 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10833 g_assert_not_reached ();
10835 is_instance = FALSE;
10838 context_used = mini_class_check_context_used (cfg, klass);
10840 /* INSTANCE CASE */
10842 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10843 if (op == CEE_STFLD) {
10844 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10846 #ifndef DISABLE_REMOTING
10847 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10848 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10849 MonoInst *iargs [5];
10851 GSHAREDVT_FAILURE (op);
10853 iargs [0] = sp [0];
10854 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10855 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10856 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10858 iargs [4] = sp [1];
10860 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10861 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10862 iargs, ip, cfg->real_offset, TRUE, &bblock);
10863 CHECK_CFG_EXCEPTION;
10864 g_assert (costs > 0);
10866 cfg->real_offset += 5;
10868 inline_costs += costs;
10870 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10877 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10879 if (mini_is_gsharedvt_klass (cfg, klass)) {
10880 MonoInst *offset_ins;
10882 context_used = mini_class_check_context_used (cfg, klass);
10884 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10885 dreg = alloc_ireg_mp (cfg);
10886 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10887 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10888 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10890 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10892 if (sp [0]->opcode != OP_LDADDR)
10893 store->flags |= MONO_INST_FAULT;
10895 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10896 /* insert call to write barrier */
10900 dreg = alloc_ireg_mp (cfg);
10901 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10902 emit_write_barrier (cfg, ptr, sp [1]);
10905 store->flags |= ins_flag;
10912 #ifndef DISABLE_REMOTING
10913 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10914 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10915 MonoInst *iargs [4];
10917 GSHAREDVT_FAILURE (op);
10919 iargs [0] = sp [0];
10920 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10921 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10922 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10923 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10924 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10925 iargs, ip, cfg->real_offset, TRUE, &bblock);
10926 CHECK_CFG_EXCEPTION;
10927 g_assert (costs > 0);
10929 cfg->real_offset += 5;
10933 inline_costs += costs;
10935 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10941 if (sp [0]->type == STACK_VTYPE) {
10944 /* Have to compute the address of the variable */
10946 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10948 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10950 g_assert (var->klass == klass);
10952 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10956 if (op == CEE_LDFLDA) {
10957 if (is_magic_tls_access (field)) {
10958 GSHAREDVT_FAILURE (*ip);
10960 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10962 if (sp [0]->type == STACK_OBJ) {
10963 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10964 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10967 dreg = alloc_ireg_mp (cfg);
10969 if (mini_is_gsharedvt_klass (cfg, klass)) {
10970 MonoInst *offset_ins;
10972 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10973 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10975 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10977 ins->klass = mono_class_from_mono_type (field->type);
10978 ins->type = STACK_MP;
10984 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10986 if (mini_is_gsharedvt_klass (cfg, klass)) {
10987 MonoInst *offset_ins;
10989 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10990 dreg = alloc_ireg_mp (cfg);
10991 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10992 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10994 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10996 load->flags |= ins_flag;
10997 if (sp [0]->opcode != OP_LDADDR)
10998 load->flags |= MONO_INST_FAULT;
11010 context_used = mini_class_check_context_used (cfg, klass);
11012 ftype = mono_field_get_type (field);
11014 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11017 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11018 * to be called here.
11020 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11021 mono_class_vtable (cfg->domain, klass);
11022 CHECK_TYPELOAD (klass);
11024 mono_domain_lock (cfg->domain);
11025 if (cfg->domain->special_static_fields)
11026 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11027 mono_domain_unlock (cfg->domain);
11029 is_special_static = mono_class_field_is_special_static (field);
11031 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11032 thread_ins = mono_get_thread_intrinsic (cfg);
11036 /* Generate IR to compute the field address */
11037 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11039 * Fast access to TLS data
11040 * Inline version of get_thread_static_data () in
11044 int idx, static_data_reg, array_reg, dreg;
11046 GSHAREDVT_FAILURE (op);
11048 MONO_ADD_INS (cfg->cbb, thread_ins);
11049 static_data_reg = alloc_ireg (cfg);
11050 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11052 if (cfg->compile_aot) {
11053 int offset_reg, offset2_reg, idx_reg;
11055 /* For TLS variables, this will return the TLS offset */
11056 EMIT_NEW_SFLDACONST (cfg, ins, field);
11057 offset_reg = ins->dreg;
11058 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11059 idx_reg = alloc_ireg (cfg);
11060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11062 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11063 array_reg = alloc_ireg (cfg);
11064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11065 offset2_reg = alloc_ireg (cfg);
11066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11068 dreg = alloc_ireg (cfg);
11069 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11071 offset = (gsize)addr & 0x7fffffff;
11072 idx = offset & 0x3f;
11074 array_reg = alloc_ireg (cfg);
11075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11076 dreg = alloc_ireg (cfg);
11077 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11079 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11080 (cfg->compile_aot && is_special_static) ||
11081 (context_used && is_special_static)) {
11082 MonoInst *iargs [2];
11084 g_assert (field->parent);
11085 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11086 if (context_used) {
11087 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11088 field, MONO_RGCTX_INFO_CLASS_FIELD);
11090 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11092 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11093 } else if (context_used) {
11094 MonoInst *static_data;
11097 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11098 method->klass->name_space, method->klass->name, method->name,
11099 depth, field->offset);
11102 if (mono_class_needs_cctor_run (klass, method))
11103 emit_generic_class_init (cfg, klass);
11106 * The pointer we're computing here is
11108 * super_info.static_data + field->offset
11110 static_data = emit_get_rgctx_klass (cfg, context_used,
11111 klass, MONO_RGCTX_INFO_STATIC_DATA);
11113 if (mini_is_gsharedvt_klass (cfg, klass)) {
11114 MonoInst *offset_ins;
11116 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11117 dreg = alloc_ireg_mp (cfg);
11118 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11119 } else if (field->offset == 0) {
11122 int addr_reg = mono_alloc_preg (cfg);
11123 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11125 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11126 MonoInst *iargs [2];
11128 g_assert (field->parent);
11129 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11130 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11131 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11133 MonoVTable *vtable = NULL;
11135 if (!cfg->compile_aot)
11136 vtable = mono_class_vtable (cfg->domain, klass);
11137 CHECK_TYPELOAD (klass);
11140 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11141 if (!(g_slist_find (class_inits, klass))) {
11142 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11143 if (cfg->verbose_level > 2)
11144 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11145 class_inits = g_slist_prepend (class_inits, klass);
11148 if (cfg->run_cctors) {
11150 /* This makes so that inline cannot trigger */
11151 /* .cctors: too many apps depend on them */
11152 /* running with a specific order... */
11154 if (! vtable->initialized)
11155 INLINE_FAILURE ("class init");
11156 ex = mono_runtime_class_init_full (vtable, FALSE);
11158 set_exception_object (cfg, ex);
11159 goto exception_exit;
11163 if (cfg->compile_aot)
11164 EMIT_NEW_SFLDACONST (cfg, ins, field);
11167 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11169 EMIT_NEW_PCONST (cfg, ins, addr);
11172 MonoInst *iargs [1];
11173 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11174 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11178 /* Generate IR to do the actual load/store operation */
11180 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11181 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11182 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11185 if (op == CEE_LDSFLDA) {
11186 ins->klass = mono_class_from_mono_type (ftype);
11187 ins->type = STACK_PTR;
11189 } else if (op == CEE_STSFLD) {
11192 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11193 store->flags |= ins_flag;
11195 gboolean is_const = FALSE;
11196 MonoVTable *vtable = NULL;
11197 gpointer addr = NULL;
11199 if (!context_used) {
11200 vtable = mono_class_vtable (cfg->domain, klass);
11201 CHECK_TYPELOAD (klass);
11203 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11204 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11205 int ro_type = ftype->type;
11207 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11208 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11209 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11212 GSHAREDVT_FAILURE (op);
11214 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11217 case MONO_TYPE_BOOLEAN:
11219 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11223 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11226 case MONO_TYPE_CHAR:
11228 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11232 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11237 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11241 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11246 case MONO_TYPE_PTR:
11247 case MONO_TYPE_FNPTR:
11248 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11249 type_to_eval_stack_type ((cfg), field->type, *sp);
11252 case MONO_TYPE_STRING:
11253 case MONO_TYPE_OBJECT:
11254 case MONO_TYPE_CLASS:
11255 case MONO_TYPE_SZARRAY:
11256 case MONO_TYPE_ARRAY:
11257 if (!mono_gc_is_moving ()) {
11258 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11259 type_to_eval_stack_type ((cfg), field->type, *sp);
11267 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11272 case MONO_TYPE_VALUETYPE:
11282 CHECK_STACK_OVF (1);
11284 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11285 load->flags |= ins_flag;
11291 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11292 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11293 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11304 token = read32 (ip + 1);
11305 klass = mini_get_class (method, token, generic_context);
11306 CHECK_TYPELOAD (klass);
11307 if (ins_flag & MONO_INST_VOLATILE) {
11308 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11309 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11311 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11312 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11313 ins->flags |= ins_flag;
11314 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11315 generic_class_is_reference_type (cfg, klass)) {
11316 /* insert call to write barrier */
11317 emit_write_barrier (cfg, sp [0], sp [1]);
11329 const char *data_ptr;
11331 guint32 field_token;
11337 token = read32 (ip + 1);
11339 klass = mini_get_class (method, token, generic_context);
11340 CHECK_TYPELOAD (klass);
11342 context_used = mini_class_check_context_used (cfg, klass);
11344 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11345 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11346 ins->sreg1 = sp [0]->dreg;
11347 ins->type = STACK_I4;
11348 ins->dreg = alloc_ireg (cfg);
11349 MONO_ADD_INS (cfg->cbb, ins);
11350 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11353 if (context_used) {
11354 MonoInst *args [3];
11355 MonoClass *array_class = mono_array_class_get (klass, 1);
11356 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11358 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11361 args [0] = emit_get_rgctx_klass (cfg, context_used,
11362 array_class, MONO_RGCTX_INFO_VTABLE);
11367 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11369 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11371 if (cfg->opt & MONO_OPT_SHARED) {
11372 /* Decompose now to avoid problems with references to the domainvar */
11373 MonoInst *iargs [3];
11375 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11376 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11377 iargs [2] = sp [0];
11379 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11381 /* Decompose later since it is needed by abcrem */
11382 MonoClass *array_type = mono_array_class_get (klass, 1);
11383 mono_class_vtable (cfg->domain, array_type);
11384 CHECK_TYPELOAD (array_type);
11386 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11387 ins->dreg = alloc_ireg_ref (cfg);
11388 ins->sreg1 = sp [0]->dreg;
11389 ins->inst_newa_class = klass;
11390 ins->type = STACK_OBJ;
11391 ins->klass = array_type;
11392 MONO_ADD_INS (cfg->cbb, ins);
11393 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11394 cfg->cbb->has_array_access = TRUE;
11396 /* Needed so mono_emit_load_get_addr () gets called */
11397 mono_get_got_var (cfg);
11407 * we inline/optimize the initialization sequence if possible.
11408 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11409 * for small sizes open code the memcpy
11410 * ensure the rva field is big enough
11412 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11413 MonoMethod *memcpy_method = get_memcpy_method ();
11414 MonoInst *iargs [3];
11415 int add_reg = alloc_ireg_mp (cfg);
11417 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11418 if (cfg->compile_aot) {
11419 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11421 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11423 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11424 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11433 if (sp [0]->type != STACK_OBJ)
11436 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11437 ins->dreg = alloc_preg (cfg);
11438 ins->sreg1 = sp [0]->dreg;
11439 ins->type = STACK_I4;
11440 /* This flag will be inherited by the decomposition */
11441 ins->flags |= MONO_INST_FAULT;
11442 MONO_ADD_INS (cfg->cbb, ins);
11443 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11444 cfg->cbb->has_array_access = TRUE;
11452 if (sp [0]->type != STACK_OBJ)
11455 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11457 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11458 CHECK_TYPELOAD (klass);
11459 /* we need to make sure that this array is exactly the type it needs
11460 * to be for correctness. the wrappers are lax with their usage
11461 * so we need to ignore them here
11463 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11464 MonoClass *array_class = mono_array_class_get (klass, 1);
11465 mini_emit_check_array_type (cfg, sp [0], array_class);
11466 CHECK_TYPELOAD (array_class);
11470 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11475 case CEE_LDELEM_I1:
11476 case CEE_LDELEM_U1:
11477 case CEE_LDELEM_I2:
11478 case CEE_LDELEM_U2:
11479 case CEE_LDELEM_I4:
11480 case CEE_LDELEM_U4:
11481 case CEE_LDELEM_I8:
11483 case CEE_LDELEM_R4:
11484 case CEE_LDELEM_R8:
11485 case CEE_LDELEM_REF: {
11491 if (*ip == CEE_LDELEM) {
11493 token = read32 (ip + 1);
11494 klass = mini_get_class (method, token, generic_context);
11495 CHECK_TYPELOAD (klass);
11496 mono_class_init (klass);
11499 klass = array_access_to_klass (*ip);
11501 if (sp [0]->type != STACK_OBJ)
11504 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11506 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11507 // FIXME-VT: OP_ICONST optimization
11508 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11509 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11510 ins->opcode = OP_LOADV_MEMBASE;
11511 } else if (sp [1]->opcode == OP_ICONST) {
11512 int array_reg = sp [0]->dreg;
11513 int index_reg = sp [1]->dreg;
11514 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11516 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11517 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11519 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11520 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11523 if (*ip == CEE_LDELEM)
11530 case CEE_STELEM_I1:
11531 case CEE_STELEM_I2:
11532 case CEE_STELEM_I4:
11533 case CEE_STELEM_I8:
11534 case CEE_STELEM_R4:
11535 case CEE_STELEM_R8:
11536 case CEE_STELEM_REF:
11541 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11543 if (*ip == CEE_STELEM) {
11545 token = read32 (ip + 1);
11546 klass = mini_get_class (method, token, generic_context);
11547 CHECK_TYPELOAD (klass);
11548 mono_class_init (klass);
11551 klass = array_access_to_klass (*ip);
11553 if (sp [0]->type != STACK_OBJ)
11556 emit_array_store (cfg, klass, sp, TRUE);
11558 if (*ip == CEE_STELEM)
11565 case CEE_CKFINITE: {
11569 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11570 ins->sreg1 = sp [0]->dreg;
11571 ins->dreg = alloc_freg (cfg);
11572 ins->type = STACK_R8;
11573 MONO_ADD_INS (bblock, ins);
11575 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11580 case CEE_REFANYVAL: {
11581 MonoInst *src_var, *src;
11583 int klass_reg = alloc_preg (cfg);
11584 int dreg = alloc_preg (cfg);
11586 GSHAREDVT_FAILURE (*ip);
11589 MONO_INST_NEW (cfg, ins, *ip);
11592 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11593 CHECK_TYPELOAD (klass);
11595 context_used = mini_class_check_context_used (cfg, klass);
11598 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11600 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11601 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11604 if (context_used) {
11605 MonoInst *klass_ins;
11607 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11608 klass, MONO_RGCTX_INFO_KLASS);
11611 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11612 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11614 mini_emit_class_check (cfg, klass_reg, klass);
11616 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11617 ins->type = STACK_MP;
11622 case CEE_MKREFANY: {
11623 MonoInst *loc, *addr;
11625 GSHAREDVT_FAILURE (*ip);
11628 MONO_INST_NEW (cfg, ins, *ip);
11631 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11632 CHECK_TYPELOAD (klass);
11634 context_used = mini_class_check_context_used (cfg, klass);
11636 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11637 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11639 if (context_used) {
11640 MonoInst *const_ins;
11641 int type_reg = alloc_preg (cfg);
11643 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11646 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11647 } else if (cfg->compile_aot) {
11648 int const_reg = alloc_preg (cfg);
11649 int type_reg = alloc_preg (cfg);
11651 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11652 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11654 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11656 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11659 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11661 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11662 ins->type = STACK_VTYPE;
11663 ins->klass = mono_defaults.typed_reference_class;
11668 case CEE_LDTOKEN: {
11670 MonoClass *handle_class;
11672 CHECK_STACK_OVF (1);
11675 n = read32 (ip + 1);
11677 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11678 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11679 handle = mono_method_get_wrapper_data (method, n);
11680 handle_class = mono_method_get_wrapper_data (method, n + 1);
11681 if (handle_class == mono_defaults.typehandle_class)
11682 handle = &((MonoClass*)handle)->byval_arg;
11685 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11690 mono_class_init (handle_class);
11691 if (cfg->generic_sharing_context) {
11692 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11693 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11694 /* This case handles ldtoken
11695 of an open type, like for
11698 } else if (handle_class == mono_defaults.typehandle_class) {
11699 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11700 } else if (handle_class == mono_defaults.fieldhandle_class)
11701 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11702 else if (handle_class == mono_defaults.methodhandle_class)
11703 context_used = mini_method_check_context_used (cfg, handle);
11705 g_assert_not_reached ();
11708 if ((cfg->opt & MONO_OPT_SHARED) &&
11709 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11710 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11711 MonoInst *addr, *vtvar, *iargs [3];
11712 int method_context_used;
11714 method_context_used = mini_method_check_context_used (cfg, method);
11716 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11718 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11719 EMIT_NEW_ICONST (cfg, iargs [1], n);
11720 if (method_context_used) {
11721 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11722 method, MONO_RGCTX_INFO_METHOD);
11723 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11725 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11726 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11728 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11732 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11734 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11735 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11736 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11737 (cmethod->klass == mono_defaults.systemtype_class) &&
11738 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11739 MonoClass *tclass = mono_class_from_mono_type (handle);
11741 mono_class_init (tclass);
11742 if (context_used) {
11743 ins = emit_get_rgctx_klass (cfg, context_used,
11744 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11745 } else if (cfg->compile_aot) {
11746 if (method->wrapper_type) {
11747 mono_error_init (&error); //got to do it since there are multiple conditionals below
11748 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11749 /* Special case for static synchronized wrappers */
11750 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11752 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11753 /* FIXME: n is not a normal token */
11755 EMIT_NEW_PCONST (cfg, ins, NULL);
11758 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11761 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11763 ins->type = STACK_OBJ;
11764 ins->klass = cmethod->klass;
11767 MonoInst *addr, *vtvar;
11769 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11771 if (context_used) {
11772 if (handle_class == mono_defaults.typehandle_class) {
11773 ins = emit_get_rgctx_klass (cfg, context_used,
11774 mono_class_from_mono_type (handle),
11775 MONO_RGCTX_INFO_TYPE);
11776 } else if (handle_class == mono_defaults.methodhandle_class) {
11777 ins = emit_get_rgctx_method (cfg, context_used,
11778 handle, MONO_RGCTX_INFO_METHOD);
11779 } else if (handle_class == mono_defaults.fieldhandle_class) {
11780 ins = emit_get_rgctx_field (cfg, context_used,
11781 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11783 g_assert_not_reached ();
11785 } else if (cfg->compile_aot) {
11786 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11788 EMIT_NEW_PCONST (cfg, ins, handle);
11790 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11791 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11792 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11802 MONO_INST_NEW (cfg, ins, OP_THROW);
11804 ins->sreg1 = sp [0]->dreg;
11806 bblock->out_of_line = TRUE;
11807 MONO_ADD_INS (bblock, ins);
11808 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11809 MONO_ADD_INS (bblock, ins);
11812 link_bblock (cfg, bblock, end_bblock);
11813 start_new_bblock = 1;
11815 case CEE_ENDFINALLY:
11816 /* mono_save_seq_point_info () depends on this */
11817 if (sp != stack_start)
11818 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11819 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11820 MONO_ADD_INS (bblock, ins);
11822 start_new_bblock = 1;
11825 * Control will leave the method so empty the stack, otherwise
11826 * the next basic block will start with a nonempty stack.
11828 while (sp != stack_start) {
11833 case CEE_LEAVE_S: {
11836 if (*ip == CEE_LEAVE) {
11838 target = ip + 5 + (gint32)read32(ip + 1);
11841 target = ip + 2 + (signed char)(ip [1]);
11844 /* empty the stack */
11845 while (sp != stack_start) {
11850 * If this leave statement is in a catch block, check for a
11851 * pending exception, and rethrow it if necessary.
11852 * We avoid doing this in runtime invoke wrappers, since those are called
11853 * by native code which excepts the wrapper to catch all exceptions.
11855 for (i = 0; i < header->num_clauses; ++i) {
11856 MonoExceptionClause *clause = &header->clauses [i];
11859 * Use <= in the final comparison to handle clauses with multiple
11860 * leave statements, like in bug #78024.
11861 * The ordering of the exception clauses guarantees that we find the
11862 * innermost clause.
11864 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11866 MonoBasicBlock *dont_throw;
11871 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11874 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11876 NEW_BBLOCK (cfg, dont_throw);
11879 * Currently, we always rethrow the abort exception, despite the
11880 * fact that this is not correct. See thread6.cs for an example.
11881 * But propagating the abort exception is more important than
11882 * getting the sematics right.
11884 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11885 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11886 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11888 MONO_START_BB (cfg, dont_throw);
11893 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11895 MonoExceptionClause *clause;
11897 for (tmp = handlers; tmp; tmp = tmp->next) {
11898 clause = tmp->data;
11899 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11901 link_bblock (cfg, bblock, tblock);
11902 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11903 ins->inst_target_bb = tblock;
11904 ins->inst_eh_block = clause;
11905 MONO_ADD_INS (bblock, ins);
11906 bblock->has_call_handler = 1;
11907 if (COMPILE_LLVM (cfg)) {
11908 MonoBasicBlock *target_bb;
11911 * Link the finally bblock with the target, since it will
11912 * conceptually branch there.
11913 * FIXME: Have to link the bblock containing the endfinally.
11915 GET_BBLOCK (cfg, target_bb, target);
11916 link_bblock (cfg, tblock, target_bb);
11919 g_list_free (handlers);
11922 MONO_INST_NEW (cfg, ins, OP_BR);
11923 MONO_ADD_INS (bblock, ins);
11924 GET_BBLOCK (cfg, tblock, target);
11925 link_bblock (cfg, bblock, tblock);
11926 ins->inst_target_bb = tblock;
11927 start_new_bblock = 1;
11929 if (*ip == CEE_LEAVE)
11938 * Mono specific opcodes
11940 case MONO_CUSTOM_PREFIX: {
11942 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11946 case CEE_MONO_ICALL: {
11948 MonoJitICallInfo *info;
11950 token = read32 (ip + 2);
11951 func = mono_method_get_wrapper_data (method, token);
11952 info = mono_find_jit_icall_by_addr (func);
11954 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11957 CHECK_STACK (info->sig->param_count);
11958 sp -= info->sig->param_count;
11960 ins = mono_emit_jit_icall (cfg, info->func, sp);
11961 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11965 inline_costs += 10 * num_calls++;
11969 case CEE_MONO_LDPTR_CARD_TABLE: {
11971 gpointer card_mask;
11972 CHECK_STACK_OVF (1);
11974 if (cfg->compile_aot)
11975 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11977 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
11981 inline_costs += 10 * num_calls++;
11984 case CEE_MONO_LDPTR_NURSERY_START: {
11987 CHECK_STACK_OVF (1);
11989 if (cfg->compile_aot)
11990 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11992 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
11996 inline_costs += 10 * num_calls++;
11999 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12000 CHECK_STACK_OVF (1);
12002 if (cfg->compile_aot)
12003 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12005 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12009 inline_costs += 10 * num_calls++;
12012 case CEE_MONO_LDPTR: {
12015 CHECK_STACK_OVF (1);
12017 token = read32 (ip + 2);
12019 ptr = mono_method_get_wrapper_data (method, token);
12020 EMIT_NEW_PCONST (cfg, ins, ptr);
12023 inline_costs += 10 * num_calls++;
12024 /* Can't embed random pointers into AOT code */
12028 case CEE_MONO_JIT_ICALL_ADDR: {
12029 MonoJitICallInfo *callinfo;
12032 CHECK_STACK_OVF (1);
12034 token = read32 (ip + 2);
12036 ptr = mono_method_get_wrapper_data (method, token);
12037 callinfo = mono_find_jit_icall_by_addr (ptr);
12038 g_assert (callinfo);
12039 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12042 inline_costs += 10 * num_calls++;
12045 case CEE_MONO_ICALL_ADDR: {
12046 MonoMethod *cmethod;
12049 CHECK_STACK_OVF (1);
12051 token = read32 (ip + 2);
12053 cmethod = mono_method_get_wrapper_data (method, token);
12055 if (cfg->compile_aot) {
12056 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12058 ptr = mono_lookup_internal_call (cmethod);
12060 EMIT_NEW_PCONST (cfg, ins, ptr);
12066 case CEE_MONO_VTADDR: {
12067 MonoInst *src_var, *src;
12073 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12074 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12079 case CEE_MONO_NEWOBJ: {
12080 MonoInst *iargs [2];
12082 CHECK_STACK_OVF (1);
12084 token = read32 (ip + 2);
12085 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12086 mono_class_init (klass);
12087 NEW_DOMAINCONST (cfg, iargs [0]);
12088 MONO_ADD_INS (cfg->cbb, iargs [0]);
12089 NEW_CLASSCONST (cfg, iargs [1], klass);
12090 MONO_ADD_INS (cfg->cbb, iargs [1]);
12091 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12093 inline_costs += 10 * num_calls++;
12096 case CEE_MONO_OBJADDR:
12099 MONO_INST_NEW (cfg, ins, OP_MOVE);
12100 ins->dreg = alloc_ireg_mp (cfg);
12101 ins->sreg1 = sp [0]->dreg;
12102 ins->type = STACK_MP;
12103 MONO_ADD_INS (cfg->cbb, ins);
12107 case CEE_MONO_LDNATIVEOBJ:
12109 * Similar to LDOBJ, but instead load the unmanaged
12110 * representation of the vtype to the stack.
12115 token = read32 (ip + 2);
12116 klass = mono_method_get_wrapper_data (method, token);
12117 g_assert (klass->valuetype);
12118 mono_class_init (klass);
12121 MonoInst *src, *dest, *temp;
12124 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12125 temp->backend.is_pinvoke = 1;
12126 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12127 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12129 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12130 dest->type = STACK_VTYPE;
12131 dest->klass = klass;
12137 case CEE_MONO_RETOBJ: {
12139 * Same as RET, but return the native representation of a vtype
12142 g_assert (cfg->ret);
12143 g_assert (mono_method_signature (method)->pinvoke);
12148 token = read32 (ip + 2);
12149 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12151 if (!cfg->vret_addr) {
12152 g_assert (cfg->ret_var_is_local);
12154 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12156 EMIT_NEW_RETLOADA (cfg, ins);
12158 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12160 if (sp != stack_start)
12163 MONO_INST_NEW (cfg, ins, OP_BR);
12164 ins->inst_target_bb = end_bblock;
12165 MONO_ADD_INS (bblock, ins);
12166 link_bblock (cfg, bblock, end_bblock);
12167 start_new_bblock = 1;
12171 case CEE_MONO_CISINST:
12172 case CEE_MONO_CCASTCLASS: {
12177 token = read32 (ip + 2);
12178 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12179 if (ip [1] == CEE_MONO_CISINST)
12180 ins = handle_cisinst (cfg, klass, sp [0]);
12182 ins = handle_ccastclass (cfg, klass, sp [0]);
12188 case CEE_MONO_SAVE_LMF:
12189 case CEE_MONO_RESTORE_LMF:
12190 #ifdef MONO_ARCH_HAVE_LMF_OPS
12191 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12192 MONO_ADD_INS (bblock, ins);
12193 cfg->need_lmf_area = TRUE;
12197 case CEE_MONO_CLASSCONST:
12198 CHECK_STACK_OVF (1);
12200 token = read32 (ip + 2);
12201 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12204 inline_costs += 10 * num_calls++;
12206 case CEE_MONO_NOT_TAKEN:
12207 bblock->out_of_line = TRUE;
12210 case CEE_MONO_TLS: {
12213 CHECK_STACK_OVF (1);
12215 key = (gint32)read32 (ip + 2);
12216 g_assert (key < TLS_KEY_NUM);
12218 ins = mono_create_tls_get (cfg, key);
12220 if (cfg->compile_aot) {
12222 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12223 ins->dreg = alloc_preg (cfg);
12224 ins->type = STACK_PTR;
12226 g_assert_not_reached ();
12229 ins->type = STACK_PTR;
12230 MONO_ADD_INS (bblock, ins);
12235 case CEE_MONO_DYN_CALL: {
12236 MonoCallInst *call;
12238 /* It would be easier to call a trampoline, but that would put an
12239 * extra frame on the stack, confusing exception handling. So
12240 * implement it inline using an opcode for now.
12243 if (!cfg->dyn_call_var) {
12244 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12245 /* prevent it from being register allocated */
12246 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12249 /* Has to use a call inst since it local regalloc expects it */
12250 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12251 ins = (MonoInst*)call;
12253 ins->sreg1 = sp [0]->dreg;
12254 ins->sreg2 = sp [1]->dreg;
12255 MONO_ADD_INS (bblock, ins);
12257 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12260 inline_costs += 10 * num_calls++;
12264 case CEE_MONO_MEMORY_BARRIER: {
12266 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12270 case CEE_MONO_JIT_ATTACH: {
12271 MonoInst *args [16], *domain_ins;
12272 MonoInst *ad_ins, *jit_tls_ins;
12273 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12275 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12277 EMIT_NEW_PCONST (cfg, ins, NULL);
12278 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12280 ad_ins = mono_get_domain_intrinsic (cfg);
12281 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12283 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12284 NEW_BBLOCK (cfg, next_bb);
12285 NEW_BBLOCK (cfg, call_bb);
12287 if (cfg->compile_aot) {
12288 /* AOT code is only used in the root domain */
12289 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12291 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12293 MONO_ADD_INS (cfg->cbb, ad_ins);
12294 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12295 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12297 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12298 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12301 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12302 MONO_START_BB (cfg, call_bb);
12305 if (cfg->compile_aot) {
12306 /* AOT code is only used in the root domain */
12307 EMIT_NEW_PCONST (cfg, args [0], NULL);
12309 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12311 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12312 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12315 MONO_START_BB (cfg, next_bb);
12321 case CEE_MONO_JIT_DETACH: {
12322 MonoInst *args [16];
12324 /* Restore the original domain */
12325 dreg = alloc_ireg (cfg);
12326 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12327 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12332 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12338 case CEE_PREFIX1: {
12341 case CEE_ARGLIST: {
12342 /* somewhat similar to LDTOKEN */
12343 MonoInst *addr, *vtvar;
12344 CHECK_STACK_OVF (1);
12345 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12347 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12348 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12350 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12351 ins->type = STACK_VTYPE;
12352 ins->klass = mono_defaults.argumenthandle_class;
12362 MonoInst *cmp, *arg1, *arg2;
12370 * The following transforms:
12371 * CEE_CEQ into OP_CEQ
12372 * CEE_CGT into OP_CGT
12373 * CEE_CGT_UN into OP_CGT_UN
12374 * CEE_CLT into OP_CLT
12375 * CEE_CLT_UN into OP_CLT_UN
12377 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12379 MONO_INST_NEW (cfg, ins, cmp->opcode);
12380 cmp->sreg1 = arg1->dreg;
12381 cmp->sreg2 = arg2->dreg;
12382 type_from_op (cfg, cmp, arg1, arg2);
12384 add_widen_op (cfg, cmp, &arg1, &arg2);
12385 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12386 cmp->opcode = OP_LCOMPARE;
12387 else if (arg1->type == STACK_R4)
12388 cmp->opcode = OP_RCOMPARE;
12389 else if (arg1->type == STACK_R8)
12390 cmp->opcode = OP_FCOMPARE;
12392 cmp->opcode = OP_ICOMPARE;
12393 MONO_ADD_INS (bblock, cmp);
12394 ins->type = STACK_I4;
12395 ins->dreg = alloc_dreg (cfg, ins->type);
12396 type_from_op (cfg, ins, arg1, arg2);
12398 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12400 * The backends expect the fceq opcodes to do the
12403 ins->sreg1 = cmp->sreg1;
12404 ins->sreg2 = cmp->sreg2;
12407 MONO_ADD_INS (bblock, ins);
12413 MonoInst *argconst;
12414 MonoMethod *cil_method;
12416 CHECK_STACK_OVF (1);
12418 n = read32 (ip + 2);
12419 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12420 if (!cmethod || mono_loader_get_last_error ())
12422 mono_class_init (cmethod->klass);
12424 mono_save_token_info (cfg, image, n, cmethod);
12426 context_used = mini_method_check_context_used (cfg, cmethod);
12428 cil_method = cmethod;
12429 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12430 METHOD_ACCESS_FAILURE (method, cil_method);
12432 if (mono_security_core_clr_enabled ())
12433 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12436 * Optimize the common case of ldftn+delegate creation
12438 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12439 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12440 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12441 MonoInst *target_ins, *handle_ins;
12442 MonoMethod *invoke;
12443 int invoke_context_used;
12445 invoke = mono_get_delegate_invoke (ctor_method->klass);
12446 if (!invoke || !mono_method_signature (invoke))
12449 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12451 target_ins = sp [-1];
12453 if (mono_security_core_clr_enabled ())
12454 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12456 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12457 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12458 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12460 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12464 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12465 /* FIXME: SGEN support */
12466 if (invoke_context_used == 0) {
12468 if (cfg->verbose_level > 3)
12469 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12470 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12473 CHECK_CFG_EXCEPTION;
12484 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12485 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12489 inline_costs += 10 * num_calls++;
12492 case CEE_LDVIRTFTN: {
12493 MonoInst *args [2];
12497 n = read32 (ip + 2);
12498 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12499 if (!cmethod || mono_loader_get_last_error ())
12501 mono_class_init (cmethod->klass);
12503 context_used = mini_method_check_context_used (cfg, cmethod);
12505 if (mono_security_core_clr_enabled ())
12506 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12509 * Optimize the common case of ldvirtftn+delegate creation
12511 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12512 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12513 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12514 MonoInst *target_ins, *handle_ins;
12515 MonoMethod *invoke;
12516 int invoke_context_used;
12517 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12519 invoke = mono_get_delegate_invoke (ctor_method->klass);
12520 if (!invoke || !mono_method_signature (invoke))
12523 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12525 target_ins = sp [-1];
12527 if (mono_security_core_clr_enabled ())
12528 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12530 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12531 /* FIXME: SGEN support */
12532 if (invoke_context_used == 0) {
12534 if (cfg->verbose_level > 3)
12535 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12536 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12539 CHECK_CFG_EXCEPTION;
12553 args [1] = emit_get_rgctx_method (cfg, context_used,
12554 cmethod, MONO_RGCTX_INFO_METHOD);
12557 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12559 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12562 inline_costs += 10 * num_calls++;
12566 CHECK_STACK_OVF (1);
12568 n = read16 (ip + 2);
12570 EMIT_NEW_ARGLOAD (cfg, ins, n);
12575 CHECK_STACK_OVF (1);
12577 n = read16 (ip + 2);
12579 NEW_ARGLOADA (cfg, ins, n);
12580 MONO_ADD_INS (cfg->cbb, ins);
12588 n = read16 (ip + 2);
12590 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12592 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12596 CHECK_STACK_OVF (1);
12598 n = read16 (ip + 2);
12600 EMIT_NEW_LOCLOAD (cfg, ins, n);
12605 unsigned char *tmp_ip;
12606 CHECK_STACK_OVF (1);
12608 n = read16 (ip + 2);
12611 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12617 EMIT_NEW_LOCLOADA (cfg, ins, n);
12626 n = read16 (ip + 2);
12628 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12630 emit_stloc_ir (cfg, sp, header, n);
12637 if (sp != stack_start)
12639 if (cfg->method != method)
12641 * Inlining this into a loop in a parent could lead to
12642 * stack overflows which is different behavior than the
12643 * non-inlined case, thus disable inlining in this case.
12645 INLINE_FAILURE("localloc");
12647 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12648 ins->dreg = alloc_preg (cfg);
12649 ins->sreg1 = sp [0]->dreg;
12650 ins->type = STACK_PTR;
12651 MONO_ADD_INS (cfg->cbb, ins);
12653 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12655 ins->flags |= MONO_INST_INIT;
12660 case CEE_ENDFILTER: {
12661 MonoExceptionClause *clause, *nearest;
12666 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12668 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12669 ins->sreg1 = (*sp)->dreg;
12670 MONO_ADD_INS (bblock, ins);
12671 start_new_bblock = 1;
12675 for (cc = 0; cc < header->num_clauses; ++cc) {
12676 clause = &header->clauses [cc];
12677 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12678 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12679 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12682 g_assert (nearest);
12683 if ((ip - header->code) != nearest->handler_offset)
12688 case CEE_UNALIGNED_:
12689 ins_flag |= MONO_INST_UNALIGNED;
12690 /* FIXME: record alignment? we can assume 1 for now */
12694 case CEE_VOLATILE_:
12695 ins_flag |= MONO_INST_VOLATILE;
12699 ins_flag |= MONO_INST_TAILCALL;
12700 cfg->flags |= MONO_CFG_HAS_TAIL;
12701 /* Can't inline tail calls at this time */
12702 inline_costs += 100000;
12709 token = read32 (ip + 2);
12710 klass = mini_get_class (method, token, generic_context);
12711 CHECK_TYPELOAD (klass);
12712 if (generic_class_is_reference_type (cfg, klass))
12713 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12715 mini_emit_initobj (cfg, *sp, NULL, klass);
12719 case CEE_CONSTRAINED_:
12721 token = read32 (ip + 2);
12722 constrained_class = mini_get_class (method, token, generic_context);
12723 CHECK_TYPELOAD (constrained_class);
12727 case CEE_INITBLK: {
12728 MonoInst *iargs [3];
12732 /* Skip optimized paths for volatile operations. */
12733 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12734 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12735 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12736 /* emit_memset only works when val == 0 */
12737 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12740 iargs [0] = sp [0];
12741 iargs [1] = sp [1];
12742 iargs [2] = sp [2];
12743 if (ip [1] == CEE_CPBLK) {
12745 * FIXME: It's unclear whether we should be emitting both the acquire
12746 * and release barriers for cpblk. It is technically both a load and
12747 * store operation, so it seems like that's the sensible thing to do.
12749 * FIXME: We emit full barriers on both sides of the operation for
12750 * simplicity. We should have a separate atomic memcpy method instead.
12752 MonoMethod *memcpy_method = get_memcpy_method ();
12754 if (ins_flag & MONO_INST_VOLATILE)
12755 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12757 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12758 call->flags |= ins_flag;
12760 if (ins_flag & MONO_INST_VOLATILE)
12761 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12763 MonoMethod *memset_method = get_memset_method ();
12764 if (ins_flag & MONO_INST_VOLATILE) {
12765 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12766 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12768 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12769 call->flags |= ins_flag;
12780 ins_flag |= MONO_INST_NOTYPECHECK;
12782 ins_flag |= MONO_INST_NORANGECHECK;
12783 /* we ignore the no-nullcheck for now since we
12784 * really do it explicitly only when doing callvirt->call
12788 case CEE_RETHROW: {
12790 int handler_offset = -1;
12792 for (i = 0; i < header->num_clauses; ++i) {
12793 MonoExceptionClause *clause = &header->clauses [i];
12794 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12795 handler_offset = clause->handler_offset;
12800 bblock->flags |= BB_EXCEPTION_UNSAFE;
12802 if (handler_offset == -1)
12805 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12806 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12807 ins->sreg1 = load->dreg;
12808 MONO_ADD_INS (bblock, ins);
12810 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12811 MONO_ADD_INS (bblock, ins);
12814 link_bblock (cfg, bblock, end_bblock);
12815 start_new_bblock = 1;
12823 CHECK_STACK_OVF (1);
12825 token = read32 (ip + 2);
12826 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12827 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12830 val = mono_type_size (type, &ialign);
12832 MonoClass *klass = mini_get_class (method, token, generic_context);
12833 CHECK_TYPELOAD (klass);
12835 val = mono_type_size (&klass->byval_arg, &ialign);
12837 if (mini_is_gsharedvt_klass (cfg, klass))
12838 GSHAREDVT_FAILURE (*ip);
12840 EMIT_NEW_ICONST (cfg, ins, val);
12845 case CEE_REFANYTYPE: {
12846 MonoInst *src_var, *src;
12848 GSHAREDVT_FAILURE (*ip);
12854 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12856 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12857 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12858 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12863 case CEE_READONLY_:
12876 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12886 g_warning ("opcode 0x%02x not handled", *ip);
12890 if (start_new_bblock != 1)
12893 bblock->cil_length = ip - bblock->cil_code;
12894 if (bblock->next_bb) {
12895 /* This could already be set because of inlining, #693905 */
12896 MonoBasicBlock *bb = bblock;
12898 while (bb->next_bb)
12900 bb->next_bb = end_bblock;
12902 bblock->next_bb = end_bblock;
12905 if (cfg->method == method && cfg->domainvar) {
12907 MonoInst *get_domain;
12909 cfg->cbb = init_localsbb;
12911 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12912 MONO_ADD_INS (cfg->cbb, get_domain);
12914 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12916 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12917 MONO_ADD_INS (cfg->cbb, store);
12920 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12921 if (cfg->compile_aot)
12922 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12923 mono_get_got_var (cfg);
12926 if (cfg->method == method && cfg->got_var)
12927 mono_emit_load_got_addr (cfg);
12929 if (init_localsbb) {
12930 cfg->cbb = init_localsbb;
12932 for (i = 0; i < header->num_locals; ++i) {
12933 emit_init_local (cfg, i, header->locals [i], init_locals);
12937 if (cfg->init_ref_vars && cfg->method == method) {
12938 /* Emit initialization for ref vars */
12939 // FIXME: Avoid duplication initialization for IL locals.
12940 for (i = 0; i < cfg->num_varinfo; ++i) {
12941 MonoInst *ins = cfg->varinfo [i];
12943 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12944 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12948 if (cfg->lmf_var && cfg->method == method) {
12949 cfg->cbb = init_localsbb;
12950 emit_push_lmf (cfg);
12953 cfg->cbb = init_localsbb;
12954 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12957 MonoBasicBlock *bb;
12960 * Make seq points at backward branch targets interruptable.
12962 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12963 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12964 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12967 /* Add a sequence point for method entry/exit events */
12968 if (seq_points && cfg->gen_sdb_seq_points) {
12969 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12970 MONO_ADD_INS (init_localsbb, ins);
12971 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12972 MONO_ADD_INS (cfg->bb_exit, ins);
12976 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12977 * the code they refer to was dead (#11880).
12979 if (sym_seq_points) {
12980 for (i = 0; i < header->code_size; ++i) {
12981 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12984 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12985 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12992 if (cfg->method == method) {
12993 MonoBasicBlock *bb;
12994 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12995 bb->region = mono_find_block_region (cfg, bb->real_offset);
12997 mono_create_spvar_for_region (cfg, bb->region);
12998 if (cfg->verbose_level > 2)
12999 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13003 if (inline_costs < 0) {
13006 /* Method is too large */
13007 mname = mono_method_full_name (method, TRUE);
13008 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13009 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13013 if ((cfg->verbose_level > 2) && (cfg->method == method))
13014 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13019 g_assert (!mono_error_ok (&cfg->error));
13023 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13027 set_exception_type_from_invalid_il (cfg, method, ip);
13031 g_slist_free (class_inits);
13032 mono_basic_block_free (original_bb);
13033 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13034 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13035 if (cfg->exception_type)
13038 return inline_costs;
13042 store_membase_reg_to_store_membase_imm (int opcode)
13045 case OP_STORE_MEMBASE_REG:
13046 return OP_STORE_MEMBASE_IMM;
13047 case OP_STOREI1_MEMBASE_REG:
13048 return OP_STOREI1_MEMBASE_IMM;
13049 case OP_STOREI2_MEMBASE_REG:
13050 return OP_STOREI2_MEMBASE_IMM;
13051 case OP_STOREI4_MEMBASE_REG:
13052 return OP_STOREI4_MEMBASE_IMM;
13053 case OP_STOREI8_MEMBASE_REG:
13054 return OP_STOREI8_MEMBASE_IMM;
13056 g_assert_not_reached ();
13063 mono_op_to_op_imm (int opcode)
13067 return OP_IADD_IMM;
13069 return OP_ISUB_IMM;
13071 return OP_IDIV_IMM;
13073 return OP_IDIV_UN_IMM;
13075 return OP_IREM_IMM;
13077 return OP_IREM_UN_IMM;
13079 return OP_IMUL_IMM;
13081 return OP_IAND_IMM;
13085 return OP_IXOR_IMM;
13087 return OP_ISHL_IMM;
13089 return OP_ISHR_IMM;
13091 return OP_ISHR_UN_IMM;
13094 return OP_LADD_IMM;
13096 return OP_LSUB_IMM;
13098 return OP_LAND_IMM;
13102 return OP_LXOR_IMM;
13104 return OP_LSHL_IMM;
13106 return OP_LSHR_IMM;
13108 return OP_LSHR_UN_IMM;
13109 #if SIZEOF_REGISTER == 8
13111 return OP_LREM_IMM;
13115 return OP_COMPARE_IMM;
13117 return OP_ICOMPARE_IMM;
13119 return OP_LCOMPARE_IMM;
13121 case OP_STORE_MEMBASE_REG:
13122 return OP_STORE_MEMBASE_IMM;
13123 case OP_STOREI1_MEMBASE_REG:
13124 return OP_STOREI1_MEMBASE_IMM;
13125 case OP_STOREI2_MEMBASE_REG:
13126 return OP_STOREI2_MEMBASE_IMM;
13127 case OP_STOREI4_MEMBASE_REG:
13128 return OP_STOREI4_MEMBASE_IMM;
13130 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13132 return OP_X86_PUSH_IMM;
13133 case OP_X86_COMPARE_MEMBASE_REG:
13134 return OP_X86_COMPARE_MEMBASE_IMM;
13136 #if defined(TARGET_AMD64)
13137 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13138 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13140 case OP_VOIDCALL_REG:
13141 return OP_VOIDCALL;
13149 return OP_LOCALLOC_IMM;
13156 ldind_to_load_membase (int opcode)
13160 return OP_LOADI1_MEMBASE;
13162 return OP_LOADU1_MEMBASE;
13164 return OP_LOADI2_MEMBASE;
13166 return OP_LOADU2_MEMBASE;
13168 return OP_LOADI4_MEMBASE;
13170 return OP_LOADU4_MEMBASE;
13172 return OP_LOAD_MEMBASE;
13173 case CEE_LDIND_REF:
13174 return OP_LOAD_MEMBASE;
13176 return OP_LOADI8_MEMBASE;
13178 return OP_LOADR4_MEMBASE;
13180 return OP_LOADR8_MEMBASE;
13182 g_assert_not_reached ();
13189 stind_to_store_membase (int opcode)
13193 return OP_STOREI1_MEMBASE_REG;
13195 return OP_STOREI2_MEMBASE_REG;
13197 return OP_STOREI4_MEMBASE_REG;
13199 case CEE_STIND_REF:
13200 return OP_STORE_MEMBASE_REG;
13202 return OP_STOREI8_MEMBASE_REG;
13204 return OP_STORER4_MEMBASE_REG;
13206 return OP_STORER8_MEMBASE_REG;
13208 g_assert_not_reached ();
13215 mono_load_membase_to_load_mem (int opcode)
13217 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13218 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13220 case OP_LOAD_MEMBASE:
13221 return OP_LOAD_MEM;
13222 case OP_LOADU1_MEMBASE:
13223 return OP_LOADU1_MEM;
13224 case OP_LOADU2_MEMBASE:
13225 return OP_LOADU2_MEM;
13226 case OP_LOADI4_MEMBASE:
13227 return OP_LOADI4_MEM;
13228 case OP_LOADU4_MEMBASE:
13229 return OP_LOADU4_MEM;
13230 #if SIZEOF_REGISTER == 8
13231 case OP_LOADI8_MEMBASE:
13232 return OP_LOADI8_MEM;
13241 op_to_op_dest_membase (int store_opcode, int opcode)
13243 #if defined(TARGET_X86)
13244 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13249 return OP_X86_ADD_MEMBASE_REG;
13251 return OP_X86_SUB_MEMBASE_REG;
13253 return OP_X86_AND_MEMBASE_REG;
13255 return OP_X86_OR_MEMBASE_REG;
13257 return OP_X86_XOR_MEMBASE_REG;
13260 return OP_X86_ADD_MEMBASE_IMM;
13263 return OP_X86_SUB_MEMBASE_IMM;
13266 return OP_X86_AND_MEMBASE_IMM;
13269 return OP_X86_OR_MEMBASE_IMM;
13272 return OP_X86_XOR_MEMBASE_IMM;
13278 #if defined(TARGET_AMD64)
13279 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13284 return OP_X86_ADD_MEMBASE_REG;
13286 return OP_X86_SUB_MEMBASE_REG;
13288 return OP_X86_AND_MEMBASE_REG;
13290 return OP_X86_OR_MEMBASE_REG;
13292 return OP_X86_XOR_MEMBASE_REG;
13294 return OP_X86_ADD_MEMBASE_IMM;
13296 return OP_X86_SUB_MEMBASE_IMM;
13298 return OP_X86_AND_MEMBASE_IMM;
13300 return OP_X86_OR_MEMBASE_IMM;
13302 return OP_X86_XOR_MEMBASE_IMM;
13304 return OP_AMD64_ADD_MEMBASE_REG;
13306 return OP_AMD64_SUB_MEMBASE_REG;
13308 return OP_AMD64_AND_MEMBASE_REG;
13310 return OP_AMD64_OR_MEMBASE_REG;
13312 return OP_AMD64_XOR_MEMBASE_REG;
13315 return OP_AMD64_ADD_MEMBASE_IMM;
13318 return OP_AMD64_SUB_MEMBASE_IMM;
13321 return OP_AMD64_AND_MEMBASE_IMM;
13324 return OP_AMD64_OR_MEMBASE_IMM;
13327 return OP_AMD64_XOR_MEMBASE_IMM;
13337 op_to_op_store_membase (int store_opcode, int opcode)
13339 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13342 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13343 return OP_X86_SETEQ_MEMBASE;
13345 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13346 return OP_X86_SETNE_MEMBASE;
13354 op_to_op_src1_membase (int load_opcode, int opcode)
13357 /* FIXME: This has sign extension issues */
13359 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13360 return OP_X86_COMPARE_MEMBASE8_IMM;
13363 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13368 return OP_X86_PUSH_MEMBASE;
13369 case OP_COMPARE_IMM:
13370 case OP_ICOMPARE_IMM:
13371 return OP_X86_COMPARE_MEMBASE_IMM;
13374 return OP_X86_COMPARE_MEMBASE_REG;
13378 #ifdef TARGET_AMD64
13379 /* FIXME: This has sign extension issues */
13381 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13382 return OP_X86_COMPARE_MEMBASE8_IMM;
13387 #ifdef __mono_ilp32__
13388 if (load_opcode == OP_LOADI8_MEMBASE)
13390 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13392 return OP_X86_PUSH_MEMBASE;
13394 /* FIXME: This only works for 32 bit immediates
13395 case OP_COMPARE_IMM:
13396 case OP_LCOMPARE_IMM:
13397 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13398 return OP_AMD64_COMPARE_MEMBASE_IMM;
13400 case OP_ICOMPARE_IMM:
13401 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13402 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13406 #ifdef __mono_ilp32__
13407 if (load_opcode == OP_LOAD_MEMBASE)
13408 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13409 if (load_opcode == OP_LOADI8_MEMBASE)
13411 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13413 return OP_AMD64_COMPARE_MEMBASE_REG;
13416 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13417 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13426 op_to_op_src2_membase (int load_opcode, int opcode)
13429 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13435 return OP_X86_COMPARE_REG_MEMBASE;
13437 return OP_X86_ADD_REG_MEMBASE;
13439 return OP_X86_SUB_REG_MEMBASE;
13441 return OP_X86_AND_REG_MEMBASE;
13443 return OP_X86_OR_REG_MEMBASE;
13445 return OP_X86_XOR_REG_MEMBASE;
13449 #ifdef TARGET_AMD64
13450 #ifdef __mono_ilp32__
13451 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13453 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13457 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13459 return OP_X86_ADD_REG_MEMBASE;
13461 return OP_X86_SUB_REG_MEMBASE;
13463 return OP_X86_AND_REG_MEMBASE;
13465 return OP_X86_OR_REG_MEMBASE;
13467 return OP_X86_XOR_REG_MEMBASE;
13469 #ifdef __mono_ilp32__
13470 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13472 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13477 return OP_AMD64_COMPARE_REG_MEMBASE;
13479 return OP_AMD64_ADD_REG_MEMBASE;
13481 return OP_AMD64_SUB_REG_MEMBASE;
13483 return OP_AMD64_AND_REG_MEMBASE;
13485 return OP_AMD64_OR_REG_MEMBASE;
13487 return OP_AMD64_XOR_REG_MEMBASE;
13496 mono_op_to_op_imm_noemul (int opcode)
13499 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13505 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13512 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13517 return mono_op_to_op_imm (opcode);
13522 * mono_handle_global_vregs:
13524 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13528 mono_handle_global_vregs (MonoCompile *cfg)
13530 gint32 *vreg_to_bb;
13531 MonoBasicBlock *bb;
13534 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13536 #ifdef MONO_ARCH_SIMD_INTRINSICS
13537 if (cfg->uses_simd_intrinsics)
13538 mono_simd_simplify_indirection (cfg);
13541 /* Find local vregs used in more than one bb */
13542 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13543 MonoInst *ins = bb->code;
13544 int block_num = bb->block_num;
13546 if (cfg->verbose_level > 2)
13547 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13550 for (; ins; ins = ins->next) {
13551 const char *spec = INS_INFO (ins->opcode);
13552 int regtype = 0, regindex;
13555 if (G_UNLIKELY (cfg->verbose_level > 2))
13556 mono_print_ins (ins);
13558 g_assert (ins->opcode >= MONO_CEE_LAST);
13560 for (regindex = 0; regindex < 4; regindex ++) {
13563 if (regindex == 0) {
13564 regtype = spec [MONO_INST_DEST];
13565 if (regtype == ' ')
13568 } else if (regindex == 1) {
13569 regtype = spec [MONO_INST_SRC1];
13570 if (regtype == ' ')
13573 } else if (regindex == 2) {
13574 regtype = spec [MONO_INST_SRC2];
13575 if (regtype == ' ')
13578 } else if (regindex == 3) {
13579 regtype = spec [MONO_INST_SRC3];
13580 if (regtype == ' ')
13585 #if SIZEOF_REGISTER == 4
13586 /* In the LLVM case, the long opcodes are not decomposed */
13587 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13589 * Since some instructions reference the original long vreg,
13590 * and some reference the two component vregs, it is quite hard
13591 * to determine when it needs to be global. So be conservative.
13593 if (!get_vreg_to_inst (cfg, vreg)) {
13594 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13596 if (cfg->verbose_level > 2)
13597 printf ("LONG VREG R%d made global.\n", vreg);
13601 * Make the component vregs volatile since the optimizations can
13602 * get confused otherwise.
13604 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13605 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13609 g_assert (vreg != -1);
13611 prev_bb = vreg_to_bb [vreg];
13612 if (prev_bb == 0) {
13613 /* 0 is a valid block num */
13614 vreg_to_bb [vreg] = block_num + 1;
13615 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13616 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13619 if (!get_vreg_to_inst (cfg, vreg)) {
13620 if (G_UNLIKELY (cfg->verbose_level > 2))
13621 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13625 if (vreg_is_ref (cfg, vreg))
13626 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13628 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13631 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13634 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13637 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13640 g_assert_not_reached ();
13644 /* Flag as having been used in more than one bb */
13645 vreg_to_bb [vreg] = -1;
13651 /* If a variable is used in only one bblock, convert it into a local vreg */
13652 for (i = 0; i < cfg->num_varinfo; i++) {
13653 MonoInst *var = cfg->varinfo [i];
13654 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13656 switch (var->type) {
13662 #if SIZEOF_REGISTER == 8
13665 #if !defined(TARGET_X86)
13666 /* Enabling this screws up the fp stack on x86 */
13669 if (mono_arch_is_soft_float ())
13672 /* Arguments are implicitly global */
13673 /* Putting R4 vars into registers doesn't work currently */
13674 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13675 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13677 * Make that the variable's liveness interval doesn't contain a call, since
13678 * that would cause the lvreg to be spilled, making the whole optimization
13681 /* This is too slow for JIT compilation */
13683 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13685 int def_index, call_index, ins_index;
13686 gboolean spilled = FALSE;
13691 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13692 const char *spec = INS_INFO (ins->opcode);
13694 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13695 def_index = ins_index;
13697 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13698 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13699 if (call_index > def_index) {
13705 if (MONO_IS_CALL (ins))
13706 call_index = ins_index;
13716 if (G_UNLIKELY (cfg->verbose_level > 2))
13717 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13718 var->flags |= MONO_INST_IS_DEAD;
13719 cfg->vreg_to_inst [var->dreg] = NULL;
13726 * Compress the varinfo and vars tables so the liveness computation is faster and
13727 * takes up less space.
13730 for (i = 0; i < cfg->num_varinfo; ++i) {
13731 MonoInst *var = cfg->varinfo [i];
13732 if (pos < i && cfg->locals_start == i)
13733 cfg->locals_start = pos;
13734 if (!(var->flags & MONO_INST_IS_DEAD)) {
13736 cfg->varinfo [pos] = cfg->varinfo [i];
13737 cfg->varinfo [pos]->inst_c0 = pos;
13738 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13739 cfg->vars [pos].idx = pos;
13740 #if SIZEOF_REGISTER == 4
13741 if (cfg->varinfo [pos]->type == STACK_I8) {
13742 /* Modify the two component vars too */
13745 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13746 var1->inst_c0 = pos;
13747 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13748 var1->inst_c0 = pos;
13755 cfg->num_varinfo = pos;
13756 if (cfg->locals_start > cfg->num_varinfo)
13757 cfg->locals_start = cfg->num_varinfo;
13761 * mono_spill_global_vars:
13763 * Generate spill code for variables which are not allocated to registers,
13764 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13765 * code is generated which could be optimized by the local optimization passes.
13768 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13770 MonoBasicBlock *bb;
13772 int orig_next_vreg;
13773 guint32 *vreg_to_lvreg;
13775 guint32 i, lvregs_len;
13776 gboolean dest_has_lvreg = FALSE;
13777 guint32 stacktypes [128];
13778 MonoInst **live_range_start, **live_range_end;
13779 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13780 int *gsharedvt_vreg_to_idx = NULL;
13782 *need_local_opts = FALSE;
13784 memset (spec2, 0, sizeof (spec2));
13786 /* FIXME: Move this function to mini.c */
13787 stacktypes ['i'] = STACK_PTR;
13788 stacktypes ['l'] = STACK_I8;
13789 stacktypes ['f'] = STACK_R8;
13790 #ifdef MONO_ARCH_SIMD_INTRINSICS
13791 stacktypes ['x'] = STACK_VTYPE;
13794 #if SIZEOF_REGISTER == 4
13795 /* Create MonoInsts for longs */
13796 for (i = 0; i < cfg->num_varinfo; i++) {
13797 MonoInst *ins = cfg->varinfo [i];
13799 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13800 switch (ins->type) {
13805 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13808 g_assert (ins->opcode == OP_REGOFFSET);
13810 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13812 tree->opcode = OP_REGOFFSET;
13813 tree->inst_basereg = ins->inst_basereg;
13814 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13816 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13818 tree->opcode = OP_REGOFFSET;
13819 tree->inst_basereg = ins->inst_basereg;
13820 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13830 if (cfg->compute_gc_maps) {
13831 /* registers need liveness info even for !non refs */
13832 for (i = 0; i < cfg->num_varinfo; i++) {
13833 MonoInst *ins = cfg->varinfo [i];
13835 if (ins->opcode == OP_REGVAR)
13836 ins->flags |= MONO_INST_GC_TRACK;
13840 if (cfg->gsharedvt) {
13841 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13843 for (i = 0; i < cfg->num_varinfo; ++i) {
13844 MonoInst *ins = cfg->varinfo [i];
13847 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13848 if (i >= cfg->locals_start) {
13850 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13851 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13852 ins->opcode = OP_GSHAREDVT_LOCAL;
13853 ins->inst_imm = idx;
13856 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13857 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13863 /* FIXME: widening and truncation */
13866 * As an optimization, when a variable allocated to the stack is first loaded into
13867 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13868 * the variable again.
13870 orig_next_vreg = cfg->next_vreg;
13871 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13872 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13876 * These arrays contain the first and last instructions accessing a given
13878 * Since we emit bblocks in the same order we process them here, and we
13879 * don't split live ranges, these will precisely describe the live range of
13880 * the variable, i.e. the instruction range where a valid value can be found
13881 * in the variables location.
13882 * The live range is computed using the liveness info computed by the liveness pass.
13883 * We can't use vmv->range, since that is an abstract live range, and we need
13884 * one which is instruction precise.
13885 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13887 /* FIXME: Only do this if debugging info is requested */
13888 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13889 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13890 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13891 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13893 /* Add spill loads/stores */
13894 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13897 if (cfg->verbose_level > 2)
13898 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13900 /* Clear vreg_to_lvreg array */
13901 for (i = 0; i < lvregs_len; i++)
13902 vreg_to_lvreg [lvregs [i]] = 0;
13906 MONO_BB_FOR_EACH_INS (bb, ins) {
13907 const char *spec = INS_INFO (ins->opcode);
13908 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13909 gboolean store, no_lvreg;
13910 int sregs [MONO_MAX_SRC_REGS];
13912 if (G_UNLIKELY (cfg->verbose_level > 2))
13913 mono_print_ins (ins);
13915 if (ins->opcode == OP_NOP)
13919 * We handle LDADDR here as well, since it can only be decomposed
13920 * when variable addresses are known.
13922 if (ins->opcode == OP_LDADDR) {
13923 MonoInst *var = ins->inst_p0;
13925 if (var->opcode == OP_VTARG_ADDR) {
13926 /* Happens on SPARC/S390 where vtypes are passed by reference */
13927 MonoInst *vtaddr = var->inst_left;
13928 if (vtaddr->opcode == OP_REGVAR) {
13929 ins->opcode = OP_MOVE;
13930 ins->sreg1 = vtaddr->dreg;
13932 else if (var->inst_left->opcode == OP_REGOFFSET) {
13933 ins->opcode = OP_LOAD_MEMBASE;
13934 ins->inst_basereg = vtaddr->inst_basereg;
13935 ins->inst_offset = vtaddr->inst_offset;
13938 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13939 /* gsharedvt arg passed by ref */
13940 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13942 ins->opcode = OP_LOAD_MEMBASE;
13943 ins->inst_basereg = var->inst_basereg;
13944 ins->inst_offset = var->inst_offset;
13945 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13946 MonoInst *load, *load2, *load3;
13947 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13948 int reg1, reg2, reg3;
13949 MonoInst *info_var = cfg->gsharedvt_info_var;
13950 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13954 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13957 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13959 g_assert (info_var);
13960 g_assert (locals_var);
13962 /* Mark the instruction used to compute the locals var as used */
13963 cfg->gsharedvt_locals_var_ins = NULL;
13965 /* Load the offset */
13966 if (info_var->opcode == OP_REGOFFSET) {
13967 reg1 = alloc_ireg (cfg);
13968 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13969 } else if (info_var->opcode == OP_REGVAR) {
13971 reg1 = info_var->dreg;
13973 g_assert_not_reached ();
13975 reg2 = alloc_ireg (cfg);
13976 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13977 /* Load the locals area address */
13978 reg3 = alloc_ireg (cfg);
13979 if (locals_var->opcode == OP_REGOFFSET) {
13980 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13981 } else if (locals_var->opcode == OP_REGVAR) {
13982 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13984 g_assert_not_reached ();
13986 /* Compute the address */
13987 ins->opcode = OP_PADD;
13991 mono_bblock_insert_before_ins (bb, ins, load3);
13992 mono_bblock_insert_before_ins (bb, load3, load2);
13994 mono_bblock_insert_before_ins (bb, load2, load);
13996 g_assert (var->opcode == OP_REGOFFSET);
13998 ins->opcode = OP_ADD_IMM;
13999 ins->sreg1 = var->inst_basereg;
14000 ins->inst_imm = var->inst_offset;
14003 *need_local_opts = TRUE;
14004 spec = INS_INFO (ins->opcode);
14007 if (ins->opcode < MONO_CEE_LAST) {
14008 mono_print_ins (ins);
14009 g_assert_not_reached ();
14013 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14017 if (MONO_IS_STORE_MEMBASE (ins)) {
14018 tmp_reg = ins->dreg;
14019 ins->dreg = ins->sreg2;
14020 ins->sreg2 = tmp_reg;
14023 spec2 [MONO_INST_DEST] = ' ';
14024 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14025 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14026 spec2 [MONO_INST_SRC3] = ' ';
14028 } else if (MONO_IS_STORE_MEMINDEX (ins))
14029 g_assert_not_reached ();
14034 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14035 printf ("\t %.3s %d", spec, ins->dreg);
14036 num_sregs = mono_inst_get_src_registers (ins, sregs);
14037 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14038 printf (" %d", sregs [srcindex]);
14045 regtype = spec [MONO_INST_DEST];
14046 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14049 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14050 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14051 MonoInst *store_ins;
14053 MonoInst *def_ins = ins;
14054 int dreg = ins->dreg; /* The original vreg */
14056 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14058 if (var->opcode == OP_REGVAR) {
14059 ins->dreg = var->dreg;
14060 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14062 * Instead of emitting a load+store, use a _membase opcode.
14064 g_assert (var->opcode == OP_REGOFFSET);
14065 if (ins->opcode == OP_MOVE) {
14069 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14070 ins->inst_basereg = var->inst_basereg;
14071 ins->inst_offset = var->inst_offset;
14074 spec = INS_INFO (ins->opcode);
14078 g_assert (var->opcode == OP_REGOFFSET);
14080 prev_dreg = ins->dreg;
14082 /* Invalidate any previous lvreg for this vreg */
14083 vreg_to_lvreg [ins->dreg] = 0;
14087 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14089 store_opcode = OP_STOREI8_MEMBASE_REG;
14092 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14094 #if SIZEOF_REGISTER != 8
14095 if (regtype == 'l') {
14096 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14097 mono_bblock_insert_after_ins (bb, ins, store_ins);
14098 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14099 mono_bblock_insert_after_ins (bb, ins, store_ins);
14100 def_ins = store_ins;
14105 g_assert (store_opcode != OP_STOREV_MEMBASE);
14107 /* Try to fuse the store into the instruction itself */
14108 /* FIXME: Add more instructions */
14109 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14110 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14111 ins->inst_imm = ins->inst_c0;
14112 ins->inst_destbasereg = var->inst_basereg;
14113 ins->inst_offset = var->inst_offset;
14114 spec = INS_INFO (ins->opcode);
14115 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14116 ins->opcode = store_opcode;
14117 ins->inst_destbasereg = var->inst_basereg;
14118 ins->inst_offset = var->inst_offset;
14122 tmp_reg = ins->dreg;
14123 ins->dreg = ins->sreg2;
14124 ins->sreg2 = tmp_reg;
14127 spec2 [MONO_INST_DEST] = ' ';
14128 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14129 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14130 spec2 [MONO_INST_SRC3] = ' ';
14132 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14133 // FIXME: The backends expect the base reg to be in inst_basereg
14134 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14136 ins->inst_basereg = var->inst_basereg;
14137 ins->inst_offset = var->inst_offset;
14138 spec = INS_INFO (ins->opcode);
14140 /* printf ("INS: "); mono_print_ins (ins); */
14141 /* Create a store instruction */
14142 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14144 /* Insert it after the instruction */
14145 mono_bblock_insert_after_ins (bb, ins, store_ins);
14147 def_ins = store_ins;
14150 * We can't assign ins->dreg to var->dreg here, since the
14151 * sregs could use it. So set a flag, and do it after
14154 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14155 dest_has_lvreg = TRUE;
14160 if (def_ins && !live_range_start [dreg]) {
14161 live_range_start [dreg] = def_ins;
14162 live_range_start_bb [dreg] = bb;
14165 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14168 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14169 tmp->inst_c1 = dreg;
14170 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14177 num_sregs = mono_inst_get_src_registers (ins, sregs);
14178 for (srcindex = 0; srcindex < 3; ++srcindex) {
14179 regtype = spec [MONO_INST_SRC1 + srcindex];
14180 sreg = sregs [srcindex];
14182 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14183 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14184 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14185 MonoInst *use_ins = ins;
14186 MonoInst *load_ins;
14187 guint32 load_opcode;
14189 if (var->opcode == OP_REGVAR) {
14190 sregs [srcindex] = var->dreg;
14191 //mono_inst_set_src_registers (ins, sregs);
14192 live_range_end [sreg] = use_ins;
14193 live_range_end_bb [sreg] = bb;
14195 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14198 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14199 /* var->dreg is a hreg */
14200 tmp->inst_c1 = sreg;
14201 mono_bblock_insert_after_ins (bb, ins, tmp);
14207 g_assert (var->opcode == OP_REGOFFSET);
14209 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14211 g_assert (load_opcode != OP_LOADV_MEMBASE);
14213 if (vreg_to_lvreg [sreg]) {
14214 g_assert (vreg_to_lvreg [sreg] != -1);
14216 /* The variable is already loaded to an lvreg */
14217 if (G_UNLIKELY (cfg->verbose_level > 2))
14218 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14219 sregs [srcindex] = vreg_to_lvreg [sreg];
14220 //mono_inst_set_src_registers (ins, sregs);
14224 /* Try to fuse the load into the instruction */
14225 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14226 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14227 sregs [0] = var->inst_basereg;
14228 //mono_inst_set_src_registers (ins, sregs);
14229 ins->inst_offset = var->inst_offset;
14230 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14231 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14232 sregs [1] = var->inst_basereg;
14233 //mono_inst_set_src_registers (ins, sregs);
14234 ins->inst_offset = var->inst_offset;
14236 if (MONO_IS_REAL_MOVE (ins)) {
14237 ins->opcode = OP_NOP;
14240 //printf ("%d ", srcindex); mono_print_ins (ins);
14242 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14244 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14245 if (var->dreg == prev_dreg) {
14247 * sreg refers to the value loaded by the load
14248 * emitted below, but we need to use ins->dreg
14249 * since it refers to the store emitted earlier.
14253 g_assert (sreg != -1);
14254 vreg_to_lvreg [var->dreg] = sreg;
14255 g_assert (lvregs_len < 1024);
14256 lvregs [lvregs_len ++] = var->dreg;
14260 sregs [srcindex] = sreg;
14261 //mono_inst_set_src_registers (ins, sregs);
14263 #if SIZEOF_REGISTER != 8
14264 if (regtype == 'l') {
14265 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14266 mono_bblock_insert_before_ins (bb, ins, load_ins);
14267 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14268 mono_bblock_insert_before_ins (bb, ins, load_ins);
14269 use_ins = load_ins;
14274 #if SIZEOF_REGISTER == 4
14275 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14277 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14278 mono_bblock_insert_before_ins (bb, ins, load_ins);
14279 use_ins = load_ins;
14283 if (var->dreg < orig_next_vreg) {
14284 live_range_end [var->dreg] = use_ins;
14285 live_range_end_bb [var->dreg] = bb;
14288 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14291 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14292 tmp->inst_c1 = var->dreg;
14293 mono_bblock_insert_after_ins (bb, ins, tmp);
14297 mono_inst_set_src_registers (ins, sregs);
14299 if (dest_has_lvreg) {
14300 g_assert (ins->dreg != -1);
14301 vreg_to_lvreg [prev_dreg] = ins->dreg;
14302 g_assert (lvregs_len < 1024);
14303 lvregs [lvregs_len ++] = prev_dreg;
14304 dest_has_lvreg = FALSE;
14308 tmp_reg = ins->dreg;
14309 ins->dreg = ins->sreg2;
14310 ins->sreg2 = tmp_reg;
14313 if (MONO_IS_CALL (ins)) {
14314 /* Clear vreg_to_lvreg array */
14315 for (i = 0; i < lvregs_len; i++)
14316 vreg_to_lvreg [lvregs [i]] = 0;
14318 } else if (ins->opcode == OP_NOP) {
14320 MONO_INST_NULLIFY_SREGS (ins);
14323 if (cfg->verbose_level > 2)
14324 mono_print_ins_index (1, ins);
14327 /* Extend the live range based on the liveness info */
14328 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14329 for (i = 0; i < cfg->num_varinfo; i ++) {
14330 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14332 if (vreg_is_volatile (cfg, vi->vreg))
14333 /* The liveness info is incomplete */
14336 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14337 /* Live from at least the first ins of this bb */
14338 live_range_start [vi->vreg] = bb->code;
14339 live_range_start_bb [vi->vreg] = bb;
14342 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14343 /* Live at least until the last ins of this bb */
14344 live_range_end [vi->vreg] = bb->last_ins;
14345 live_range_end_bb [vi->vreg] = bb;
14351 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14353 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14354 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14356 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14357 for (i = 0; i < cfg->num_varinfo; ++i) {
14358 int vreg = MONO_VARINFO (cfg, i)->vreg;
14361 if (live_range_start [vreg]) {
14362 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14364 ins->inst_c1 = vreg;
14365 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14367 if (live_range_end [vreg]) {
14368 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14370 ins->inst_c1 = vreg;
14371 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14372 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14374 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14380 if (cfg->gsharedvt_locals_var_ins) {
14381 /* Nullify if unused */
14382 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14383 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14386 g_free (live_range_start);
14387 g_free (live_range_end);
14388 g_free (live_range_start_bb);
14389 g_free (live_range_end_bb);
14394 * - use 'iadd' instead of 'int_add'
14395 * - handling ovf opcodes: decompose in method_to_ir.
14396 * - unify iregs/fregs
14397 * -> partly done, the missing parts are:
14398 * - a more complete unification would involve unifying the hregs as well, so
14399 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14400 * would no longer map to the machine hregs, so the code generators would need to
14401 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14402 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14403 * fp/non-fp branches speeds it up by about 15%.
14404 * - use sext/zext opcodes instead of shifts
14406 * - get rid of TEMPLOADs if possible and use vregs instead
14407 * - clean up usage of OP_P/OP_ opcodes
14408 * - cleanup usage of DUMMY_USE
14409 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14411 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14412 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14413 * - make sure handle_stack_args () is called before the branch is emitted
14414 * - when the new IR is done, get rid of all unused stuff
14415 * - COMPARE/BEQ as separate instructions or unify them ?
14416 * - keeping them separate allows specialized compare instructions like
14417 * compare_imm, compare_membase
14418 * - most back ends unify fp compare+branch, fp compare+ceq
14419 * - integrate mono_save_args into inline_method
14420 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14421 * - handle long shift opts on 32 bit platforms somehow: they require
14422 * 3 sregs (2 for arg1 and 1 for arg2)
14423 * - make byref a 'normal' type.
14424 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14425 * variable if needed.
14426 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14427 * like inline_method.
14428 * - remove inlining restrictions
14429 * - fix LNEG and enable cfold of INEG
14430 * - generalize x86 optimizations like ldelema as a peephole optimization
14431 * - add store_mem_imm for amd64
14432 * - optimize the loading of the interruption flag in the managed->native wrappers
14433 * - avoid special handling of OP_NOP in passes
14434 * - move code inserting instructions into one function/macro.
14435 * - try a coalescing phase after liveness analysis
14436 * - add float -> vreg conversion + local optimizations on !x86
14437 * - figure out how to handle decomposed branches during optimizations, ie.
14438 * compare+branch, op_jump_table+op_br etc.
14439 * - promote RuntimeXHandles to vregs
14440 * - vtype cleanups:
14441 * - add a NEW_VARLOADA_VREG macro
14442 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14443 * accessing vtype fields.
14444 * - get rid of I8CONST on 64 bit platforms
14445 * - dealing with the increase in code size due to branches created during opcode
14447 * - use extended basic blocks
14448 * - all parts of the JIT
14449 * - handle_global_vregs () && local regalloc
14450 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14451 * - sources of increase in code size:
14454 * - isinst and castclass
14455 * - lvregs not allocated to global registers even if used multiple times
14456 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14458 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14459 * - add all micro optimizations from the old JIT
14460 * - put tree optimizations into the deadce pass
14461 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14462 * specific function.
14463 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14464 * fcompare + branchCC.
14465 * - create a helper function for allocating a stack slot, taking into account
14466 * MONO_CFG_HAS_SPILLUP.
14468 * - merge the ia64 switch changes.
14469 * - optimize mono_regstate2_alloc_int/float.
14470 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14471 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14472 * parts of the tree could be separated by other instructions, killing the tree
14473 * arguments, or stores killing loads etc. Also, should we fold loads into other
14474 * instructions if the result of the load is used multiple times ?
14475 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14476 * - LAST MERGE: 108395.
14477 * - when returning vtypes in registers, generate IR and append it to the end of the
14478 * last bb instead of doing it in the epilog.
14479 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14487 - When to decompose opcodes:
14488 - earlier: this makes some optimizations hard to implement, since the low level IR
14489 no longer contains the neccessary information. But it is easier to do.
14490 - later: harder to implement, enables more optimizations.
14491 - Branches inside bblocks:
14492 - created when decomposing complex opcodes.
14493 - branches to another bblock: harmless, but not tracked by the branch
14494 optimizations, so need to branch to a label at the start of the bblock.
14495 - branches to inside the same bblock: very problematic, trips up the local
14496 reg allocator. Can be fixed by spitting the current bblock, but that is a
14497 complex operation, since some local vregs can become global vregs etc.
14498 - Local/global vregs:
14499 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14500 local register allocator.
14501 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14502 structure, created by mono_create_var (). Assigned to hregs or the stack by
14503 the global register allocator.
14504 - When to do optimizations like alu->alu_imm:
14505 - earlier -> saves work later on since the IR will be smaller/simpler
14506 - later -> can work on more instructions
14507 - Handling of valuetypes:
14508 - When a vtype is pushed on the stack, a new temporary is created, an
14509 instruction computing its address (LDADDR) is emitted and pushed on
14510 the stack. Need to optimize cases when the vtype is used immediately as in
14511 argument passing, stloc etc.
14512 - Instead of the to_end stuff in the old JIT, simply call the function handling
14513 the values on the stack before emitting the last instruction of the bb.
14516 #endif /* DISABLE_JIT */