2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/monitor.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
70 #include "jit-icalls.h"
72 #include "debugger-agent.h"
73 #include "seq-points.h"
74 #include "aot-compiler.h"
75 #include "mini-llvm.h"
77 #define BRANCH_COST 10
78 #define INLINE_LENGTH_LIMIT 20
80 /* These have 'cfg' as an implicit argument */
81 #define INLINE_FAILURE(msg) do { \
82 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
83 inline_failure (cfg, msg); \
84 goto exception_exit; \
87 #define CHECK_CFG_EXCEPTION do {\
88 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
89 goto exception_exit; \
91 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
92 method_access_failure ((cfg), (method), (cmethod)); \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
165 * Instruction metadata
173 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
174 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
180 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
185 /* keep in sync with the enum in mini.h */
188 #include "mini-ops.h"
193 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
194 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
196 * This should contain the index of the last sreg + 1. This is not the same
197 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
199 const gint8 ins_sreg_counts[] = {
200 #include "mini-ops.h"
205 #define MONO_INIT_VARINFO(vi,id) do { \
206 (vi)->range.first_use.pos.bid = 0xffff; \
212 mono_alloc_ireg (MonoCompile *cfg)
214 return alloc_ireg (cfg);
218 mono_alloc_lreg (MonoCompile *cfg)
220 return alloc_lreg (cfg);
224 mono_alloc_freg (MonoCompile *cfg)
226 return alloc_freg (cfg);
230 mono_alloc_preg (MonoCompile *cfg)
232 return alloc_preg (cfg);
236 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
238 return alloc_dreg (cfg, stack_type);
242 * mono_alloc_ireg_ref:
244 * Allocate an IREG, and mark it as holding a GC ref.
247 mono_alloc_ireg_ref (MonoCompile *cfg)
249 return alloc_ireg_ref (cfg);
253 * mono_alloc_ireg_mp:
255 * Allocate an IREG, and mark it as holding a managed pointer.
258 mono_alloc_ireg_mp (MonoCompile *cfg)
260 return alloc_ireg_mp (cfg);
264 * mono_alloc_ireg_copy:
266 * Allocate an IREG with the same GC type as VREG.
269 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
271 if (vreg_is_ref (cfg, vreg))
272 return alloc_ireg_ref (cfg);
273 else if (vreg_is_mp (cfg, vreg))
274 return alloc_ireg_mp (cfg);
276 return alloc_ireg (cfg);
280 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
285 type = mini_get_underlying_type (type);
287 switch (type->type) {
300 case MONO_TYPE_FNPTR:
302 case MONO_TYPE_CLASS:
303 case MONO_TYPE_STRING:
304 case MONO_TYPE_OBJECT:
305 case MONO_TYPE_SZARRAY:
306 case MONO_TYPE_ARRAY:
310 #if SIZEOF_REGISTER == 8
316 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
319 case MONO_TYPE_VALUETYPE:
320 if (type->data.klass->enumtype) {
321 type = mono_class_enum_basetype (type->data.klass);
324 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 case MONO_TYPE_TYPEDBYREF:
329 case MONO_TYPE_GENERICINST:
330 type = &type->data.generic_class->container_class->byval_arg;
334 g_assert (cfg->gshared);
335 if (mini_type_var_is_vt (type))
338 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
340 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
346 mono_print_bb (MonoBasicBlock *bb, const char *msg)
351 printf ("\n%s %d: [IN: ", msg, bb->block_num);
352 for (i = 0; i < bb->in_count; ++i)
353 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
355 for (i = 0; i < bb->out_count; ++i)
356 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
383 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
384 g_free (method_fname);
385 g_free (cil_method_fname);
388 static MONO_NEVER_INLINE void
389 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
391 char *method_fname = mono_method_full_name (method, TRUE);
392 char *field_fname = mono_field_full_name (field);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
394 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
395 g_free (method_fname);
396 g_free (field_fname);
399 static MONO_NEVER_INLINE void
400 inline_failure (MonoCompile *cfg, const char *msg)
402 if (cfg->verbose_level >= 2)
403 printf ("inline failed: %s\n", msg);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
407 static MONO_NEVER_INLINE void
408 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 if (cfg->verbose_level > 2) \
411 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
415 static MONO_NEVER_INLINE void
416 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
418 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
419 if (cfg->verbose_level >= 2)
420 printf ("%s\n", cfg->exception_message);
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
425 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
426 * foo<T> (int i) { ldarg.0; box T; }
428 #define UNVERIFIED do { \
429 if (cfg->gsharedvt) { \
430 if (cfg->verbose_level > 2) \
431 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
432 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
433 goto exception_exit; \
435 break_on_unverified (); \
439 #define GET_BBLOCK(cfg,tblock,ip) do { \
440 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
442 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
443 NEW_BBLOCK (cfg, (tblock)); \
444 (tblock)->cil_code = (ip); \
445 ADD_BBLOCK (cfg, (tblock)); \
449 #if defined(TARGET_X86) || defined(TARGET_AMD64)
450 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
451 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
452 (dest)->dreg = alloc_ireg_mp ((cfg)); \
453 (dest)->sreg1 = (sr1); \
454 (dest)->sreg2 = (sr2); \
455 (dest)->inst_imm = (imm); \
456 (dest)->backend.shift_amount = (shift); \
457 MONO_ADD_INS ((cfg)->cbb, (dest)); \
461 /* Emit conversions so both operands of a binary opcode are of the same type */
463 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
465 MonoInst *arg1 = *arg1_ref;
466 MonoInst *arg2 = *arg2_ref;
469 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
470 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
473 /* Mixing r4/r8 is allowed by the spec */
474 if (arg1->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
478 conv->type = STACK_R8;
482 if (arg2->type == STACK_R4) {
483 int dreg = alloc_freg (cfg);
485 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
486 conv->type = STACK_R8;
492 #if SIZEOF_REGISTER == 8
493 /* FIXME: Need to add many more cases */
494 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
497 int dr = alloc_preg (cfg);
498 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
499 (ins)->sreg2 = widen->dreg;
504 #define ADD_BINOP(op) do { \
505 MONO_INST_NEW (cfg, ins, (op)); \
507 ins->sreg1 = sp [0]->dreg; \
508 ins->sreg2 = sp [1]->dreg; \
509 type_from_op (cfg, ins, sp [0], sp [1]); \
511 /* Have to insert a widening op */ \
512 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
513 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
518 #define ADD_UNOP(op) do { \
519 MONO_INST_NEW (cfg, ins, (op)); \
521 ins->sreg1 = sp [0]->dreg; \
522 type_from_op (cfg, ins, sp [0], NULL); \
524 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
525 MONO_ADD_INS ((cfg)->cbb, (ins)); \
526 *sp++ = mono_decompose_opcode (cfg, ins); \
529 #define ADD_BINCOND(next_block) do { \
532 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
533 cmp->sreg1 = sp [0]->dreg; \
534 cmp->sreg2 = sp [1]->dreg; \
535 type_from_op (cfg, cmp, sp [0], sp [1]); \
537 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
538 type_from_op (cfg, ins, sp [0], sp [1]); \
539 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
540 GET_BBLOCK (cfg, tblock, target); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_true_bb = tblock; \
543 if ((next_block)) { \
544 link_bblock (cfg, cfg->cbb, (next_block)); \
545 ins->inst_false_bb = (next_block); \
546 start_new_bblock = 1; \
548 GET_BBLOCK (cfg, tblock, ip); \
549 link_bblock (cfg, cfg->cbb, tblock); \
550 ins->inst_false_bb = tblock; \
551 start_new_bblock = 2; \
553 if (sp != stack_start) { \
554 handle_stack_args (cfg, stack_start, sp - stack_start); \
555 CHECK_UNVERIFIABLE (cfg); \
557 MONO_ADD_INS (cfg->cbb, cmp); \
558 MONO_ADD_INS (cfg->cbb, ins); \
562 * link_bblock: Links two basic blocks
564 * links two basic blocks in the control flow graph, the 'from'
565 * argument is the starting block and the 'to' argument is the block
566 * the control flow ends to after 'from'.
569 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
571 MonoBasicBlock **newa;
575 if (from->cil_code) {
577 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
579 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
582 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
584 printf ("edge from entry to exit\n");
589 for (i = 0; i < from->out_count; ++i) {
590 if (to == from->out_bb [i]) {
596 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
597 for (i = 0; i < from->out_count; ++i) {
598 newa [i] = from->out_bb [i];
606 for (i = 0; i < to->in_count; ++i) {
607 if (from == to->in_bb [i]) {
613 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
614 for (i = 0; i < to->in_count; ++i) {
615 newa [i] = to->in_bb [i];
624 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
626 link_bblock (cfg, from, to);
630 * mono_find_block_region:
632 * We mark each basic block with a region ID. We use that to avoid BB
633 * optimizations when blocks are in different regions.
636 * A region token that encodes where this region is, and information
637 * about the clause owner for this block.
639 * The region encodes the try/catch/filter clause that owns this block
640 * as well as the type. -1 is a special value that represents a block
641 * that is in none of try/catch/filter.
644 mono_find_block_region (MonoCompile *cfg, int offset)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
650 for (i = 0; i < header->num_clauses; ++i) {
651 clause = &header->clauses [i];
652 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
653 (offset < (clause->handler_offset)))
654 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
656 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
657 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
658 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
659 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
660 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
662 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
665 for (i = 0; i < header->num_clauses; ++i) {
666 clause = &header->clauses [i];
668 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
669 return ((i + 1) << 8) | clause->flags;
676 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
678 MonoMethodHeader *header = cfg->header;
679 MonoExceptionClause *clause;
683 for (i = 0; i < header->num_clauses; ++i) {
684 clause = &header->clauses [i];
685 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
686 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
687 if (clause->flags == type)
688 res = g_list_append (res, clause);
695 mono_create_spvar_for_region (MonoCompile *cfg, int region)
699 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
703 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
704 /* prevent it from being register allocated */
705 var->flags |= MONO_INST_VOLATILE;
707 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
711 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
713 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
717 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
721 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
725 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
726 /* prevent it from being register allocated */
727 var->flags |= MONO_INST_VOLATILE;
729 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
735 * Returns the type used in the eval stack when @type is loaded.
736 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
739 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
743 type = mini_get_underlying_type (type);
744 inst->klass = klass = mono_class_from_mono_type (type);
746 inst->type = STACK_MP;
751 switch (type->type) {
753 inst->type = STACK_INV;
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->gshared);
805 if (mini_is_gsharedvt_type (type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1172 switch (args->type) {
1182 for (i = 0; i < sig->param_count; ++i) {
1183 switch (args [i].type) {
1187 if (!sig->params [i]->byref)
1191 if (sig->params [i]->byref)
1193 switch (sig->params [i]->type) {
1194 case MONO_TYPE_CLASS:
1195 case MONO_TYPE_STRING:
1196 case MONO_TYPE_OBJECT:
1197 case MONO_TYPE_SZARRAY:
1198 case MONO_TYPE_ARRAY:
1205 if (sig->params [i]->byref)
1207 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1216 /*if (!param_table [args [i].type] [sig->params [i]->type])
1224 * When we need a pointer to the current domain many times in a method, we
1225 * call mono_domain_get() once and we store the result in a local variable.
1226 * This function returns the variable that represents the MonoDomain*.
1228 inline static MonoInst *
1229 mono_get_domainvar (MonoCompile *cfg)
1231 if (!cfg->domainvar)
1232 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1233 return cfg->domainvar;
1237 * The got_var contains the address of the Global Offset Table when AOT
1241 mono_get_got_var (MonoCompile *cfg)
1243 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1252 mono_get_vtable_var (MonoCompile *cfg)
1254 g_assert (cfg->gshared);
1256 if (!cfg->rgctx_var) {
1257 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1258 /* force the var to be stack allocated */
1259 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1262 return cfg->rgctx_var;
1266 type_from_stack_type (MonoInst *ins) {
1267 switch (ins->type) {
1268 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1269 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1270 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1271 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1272 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1274 return &ins->klass->this_arg;
1275 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1276 case STACK_VTYPE: return &ins->klass->byval_arg;
1278 g_error ("stack type %d to monotype not handled\n", ins->type);
1283 static G_GNUC_UNUSED int
1284 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1286 t = mono_type_get_underlying_type (t);
1298 case MONO_TYPE_FNPTR:
1300 case MONO_TYPE_CLASS:
1301 case MONO_TYPE_STRING:
1302 case MONO_TYPE_OBJECT:
1303 case MONO_TYPE_SZARRAY:
1304 case MONO_TYPE_ARRAY:
1310 return cfg->r4_stack_type;
1313 case MONO_TYPE_VALUETYPE:
1314 case MONO_TYPE_TYPEDBYREF:
1316 case MONO_TYPE_GENERICINST:
1317 if (mono_type_generic_inst_is_valuetype (t))
1323 g_assert_not_reached ();
1330 array_access_to_klass (int opcode)
1334 return mono_defaults.byte_class;
1336 return mono_defaults.uint16_class;
1339 return mono_defaults.int_class;
1342 return mono_defaults.sbyte_class;
1345 return mono_defaults.int16_class;
1348 return mono_defaults.int32_class;
1350 return mono_defaults.uint32_class;
1353 return mono_defaults.int64_class;
1356 return mono_defaults.single_class;
1359 return mono_defaults.double_class;
1360 case CEE_LDELEM_REF:
1361 case CEE_STELEM_REF:
1362 return mono_defaults.object_class;
1364 g_assert_not_reached ();
1370 * We try to share variables when possible
1373 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1378 /* inlining can result in deeper stacks */
1379 if (slot >= cfg->header->max_stack)
1380 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1382 pos = ins->type - 1 + slot * STACK_MAX;
1384 switch (ins->type) {
1391 if ((vnum = cfg->intvars [pos]))
1392 return cfg->varinfo [vnum];
1393 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1394 cfg->intvars [pos] = res->inst_c0;
1397 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1403 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1406 * Don't use this if a generic_context is set, since that means AOT can't
1407 * look up the method using just the image+token.
1408 * table == 0 means this is a reference made from a wrapper.
1410 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1411 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1412 jump_info_token->image = image;
1413 jump_info_token->token = token;
1414 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1419 * This function is called to handle items that are left on the evaluation stack
1420 * at basic block boundaries. What happens is that we save the values to local variables
1421 * and we reload them later when first entering the target basic block (with the
1422 * handle_loaded_temps () function).
1423 * A single joint point will use the same variables (stored in the array bb->out_stack or
1424 * bb->in_stack, if the basic block is before or after the joint point).
1426 * This function needs to be called _before_ emitting the last instruction of
1427 * the bb (i.e. before emitting a branch).
1428 * If the stack merge fails at a join point, cfg->unverifiable is set.
1431 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1434 MonoBasicBlock *bb = cfg->cbb;
1435 MonoBasicBlock *outb;
1436 MonoInst *inst, **locals;
1441 if (cfg->verbose_level > 3)
1442 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1443 if (!bb->out_scount) {
1444 bb->out_scount = count;
1445 //printf ("bblock %d has out:", bb->block_num);
1447 for (i = 0; i < bb->out_count; ++i) {
1448 outb = bb->out_bb [i];
1449 /* exception handlers are linked, but they should not be considered for stack args */
1450 if (outb->flags & BB_EXCEPTION_HANDLER)
1452 //printf (" %d", outb->block_num);
1453 if (outb->in_stack) {
1455 bb->out_stack = outb->in_stack;
1461 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1462 for (i = 0; i < count; ++i) {
1464 * try to reuse temps already allocated for this purpouse, if they occupy the same
1465 * stack slot and if they are of the same type.
1466 * This won't cause conflicts since if 'local' is used to
1467 * store one of the values in the in_stack of a bblock, then
1468 * the same variable will be used for the same outgoing stack
1470 * This doesn't work when inlining methods, since the bblocks
1471 * in the inlined methods do not inherit their in_stack from
1472 * the bblock they are inlined to. See bug #58863 for an
1475 if (cfg->inlined_method)
1476 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1478 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1483 for (i = 0; i < bb->out_count; ++i) {
1484 outb = bb->out_bb [i];
1485 /* exception handlers are linked, but they should not be considered for stack args */
1486 if (outb->flags & BB_EXCEPTION_HANDLER)
1488 if (outb->in_scount) {
1489 if (outb->in_scount != bb->out_scount) {
1490 cfg->unverifiable = TRUE;
1493 continue; /* check they are the same locals */
1495 outb->in_scount = count;
1496 outb->in_stack = bb->out_stack;
1499 locals = bb->out_stack;
1501 for (i = 0; i < count; ++i) {
1502 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1503 inst->cil_code = sp [i]->cil_code;
1504 sp [i] = locals [i];
1505 if (cfg->verbose_level > 3)
1506 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1510 * It is possible that the out bblocks already have in_stack assigned, and
1511 * the in_stacks differ. In this case, we will store to all the different
1518 /* Find a bblock which has a different in_stack */
1520 while (bindex < bb->out_count) {
1521 outb = bb->out_bb [bindex];
1522 /* exception handlers are linked, but they should not be considered for stack args */
1523 if (outb->flags & BB_EXCEPTION_HANDLER) {
1527 if (outb->in_stack != locals) {
1528 for (i = 0; i < count; ++i) {
1529 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1530 inst->cil_code = sp [i]->cil_code;
1531 sp [i] = locals [i];
1532 if (cfg->verbose_level > 3)
1533 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1535 locals = outb->in_stack;
1545 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1549 if (cfg->compile_aot) {
1550 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1556 ji.type = patch_type;
1557 ji.data.target = data;
1558 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1559 mono_error_assert_ok (&error);
1561 EMIT_NEW_PCONST (cfg, ins, target);
1567 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1569 int ibitmap_reg = alloc_preg (cfg);
1570 #ifdef COMPRESSED_INTERFACE_BITMAP
1572 MonoInst *res, *ins;
1573 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1574 MONO_ADD_INS (cfg->cbb, ins);
1576 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1577 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1578 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1580 int ibitmap_byte_reg = alloc_preg (cfg);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1584 if (cfg->compile_aot) {
1585 int iid_reg = alloc_preg (cfg);
1586 int shifted_iid_reg = alloc_preg (cfg);
1587 int ibitmap_byte_address_reg = alloc_preg (cfg);
1588 int masked_iid_reg = alloc_preg (cfg);
1589 int iid_one_bit_reg = alloc_preg (cfg);
1590 int iid_bit_reg = alloc_preg (cfg);
1591 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1593 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1596 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1600 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1607 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1608 * stored in "klass_reg" implements the interface "klass".
1611 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1613 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1617 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1618 * stored in "vtable_reg" implements the interface "klass".
1621 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1623 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1627 * Emit code which checks whenever the interface id of @klass is smaller than
1628 * than the value given by max_iid_reg.
1631 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1632 MonoBasicBlock *false_target)
1634 if (cfg->compile_aot) {
1635 int iid_reg = alloc_preg (cfg);
1636 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1644 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1647 /* Same as above, but obtains max_iid from a vtable */
1649 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1650 MonoBasicBlock *false_target)
1652 int max_iid_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1655 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1658 /* Same as above, but obtains max_iid from a klass */
1660 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1661 MonoBasicBlock *false_target)
1663 int max_iid_reg = alloc_preg (cfg);
1665 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1666 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1670 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1672 int idepth_reg = alloc_preg (cfg);
1673 int stypes_reg = alloc_preg (cfg);
1674 int stype = alloc_preg (cfg);
1676 mono_class_setup_supertypes (klass);
1678 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1681 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1686 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1687 } else if (cfg->compile_aot) {
1688 int const_reg = alloc_preg (cfg);
1689 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1690 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1698 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1700 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1704 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1706 int intf_reg = alloc_preg (cfg);
1708 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1709 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1714 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1718 * Variant of the above that takes a register to the class, not the vtable.
1721 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1723 int intf_bit_reg = alloc_preg (cfg);
1725 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1726 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1731 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1735 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1738 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1740 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1747 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1749 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1753 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1755 if (cfg->compile_aot) {
1756 int const_reg = alloc_preg (cfg);
1757 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1758 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1766 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1769 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1772 int rank_reg = alloc_preg (cfg);
1773 int eclass_reg = alloc_preg (cfg);
1775 g_assert (!klass_inst);
1776 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1778 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1779 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1780 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1781 if (klass->cast_class == mono_defaults.object_class) {
1782 int parent_reg = alloc_preg (cfg);
1783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1784 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1785 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1786 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1787 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1788 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1789 } else if (klass->cast_class == mono_defaults.enum_class) {
1790 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1791 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1792 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1794 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1795 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1798 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1799 /* Check that the object is a vector too */
1800 int bounds_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1802 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1803 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1806 int idepth_reg = alloc_preg (cfg);
1807 int stypes_reg = alloc_preg (cfg);
1808 int stype = alloc_preg (cfg);
1810 mono_class_setup_supertypes (klass);
1812 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1813 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1814 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1815 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1819 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1824 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1826 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1830 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1834 g_assert (val == 0);
1839 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1842 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1845 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1848 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1850 #if SIZEOF_REGISTER == 8
1852 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1858 val_reg = alloc_preg (cfg);
1860 if (SIZEOF_REGISTER == 8)
1861 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1863 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1866 /* This could be optimized further if neccesary */
1868 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1875 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1882 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1894 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1899 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1906 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1913 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1914 g_assert (size < 10000);
1917 /* This could be optimized further if neccesary */
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1928 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1930 cur_reg = alloc_preg (cfg);
1931 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1932 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1940 cur_reg = alloc_preg (cfg);
1941 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1942 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1948 cur_reg = alloc_preg (cfg);
1949 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1950 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1956 cur_reg = alloc_preg (cfg);
1957 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1958 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1966 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1970 if (cfg->compile_aot) {
1971 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1972 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1974 ins->sreg2 = c->dreg;
1975 MONO_ADD_INS (cfg->cbb, ins);
1977 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1979 ins->inst_offset = mini_get_tls_offset (tls_key);
1980 MONO_ADD_INS (cfg->cbb, ins);
1987 * Emit IR to push the current LMF onto the LMF stack.
1990 emit_push_lmf (MonoCompile *cfg)
1993 * Emit IR to push the LMF:
1994 * lmf_addr = <lmf_addr from tls>
1995 * lmf->lmf_addr = lmf_addr
1996 * lmf->prev_lmf = *lmf_addr
1999 int lmf_reg, prev_lmf_reg;
2000 MonoInst *ins, *lmf_ins;
2005 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2006 /* Load current lmf */
2007 lmf_ins = mono_get_lmf_intrinsic (cfg);
2009 MONO_ADD_INS (cfg->cbb, lmf_ins);
2010 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2011 lmf_reg = ins->dreg;
2012 /* Save previous_lmf */
2013 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2015 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2018 * Store lmf_addr in a variable, so it can be allocated to a global register.
2020 if (!cfg->lmf_addr_var)
2021 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2024 ins = mono_get_jit_tls_intrinsic (cfg);
2026 int jit_tls_dreg = ins->dreg;
2028 MONO_ADD_INS (cfg->cbb, ins);
2029 lmf_reg = alloc_preg (cfg);
2030 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2032 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2035 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2037 MONO_ADD_INS (cfg->cbb, lmf_ins);
2040 MonoInst *args [16], *jit_tls_ins, *ins;
2042 /* Inline mono_get_lmf_addr () */
2043 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2045 /* Load mono_jit_tls_id */
2046 if (cfg->compile_aot)
2047 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2049 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2050 /* call pthread_getspecific () */
2051 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2052 /* lmf_addr = &jit_tls->lmf */
2053 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2056 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2060 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2062 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2063 lmf_reg = ins->dreg;
2065 prev_lmf_reg = alloc_preg (cfg);
2066 /* Save previous_lmf */
2067 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2068 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2070 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2077 * Emit IR to pop the current LMF from the LMF stack.
2080 emit_pop_lmf (MonoCompile *cfg)
2082 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2088 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2089 lmf_reg = ins->dreg;
2091 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2092 /* Load previous_lmf */
2093 prev_lmf_reg = alloc_preg (cfg);
2094 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2096 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2099 * Emit IR to pop the LMF:
2100 * *(lmf->lmf_addr) = lmf->prev_lmf
2102 /* This could be called before emit_push_lmf () */
2103 if (!cfg->lmf_addr_var)
2104 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2105 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2107 prev_lmf_reg = alloc_preg (cfg);
2108 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2109 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2114 emit_instrumentation_call (MonoCompile *cfg, void *func)
2116 MonoInst *iargs [1];
2119 * Avoid instrumenting inlined methods since it can
2120 * distort profiling results.
2122 if (cfg->method != cfg->current_method)
2125 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2126 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2127 mono_emit_jit_icall (cfg, func, iargs);
2132 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2135 type = mini_get_underlying_type (type);
2136 switch (type->type) {
2137 case MONO_TYPE_VOID:
2138 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2145 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2149 case MONO_TYPE_FNPTR:
2150 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2151 case MONO_TYPE_CLASS:
2152 case MONO_TYPE_STRING:
2153 case MONO_TYPE_OBJECT:
2154 case MONO_TYPE_SZARRAY:
2155 case MONO_TYPE_ARRAY:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2159 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2162 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2164 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2166 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2167 case MONO_TYPE_VALUETYPE:
2168 if (type->data.klass->enumtype) {
2169 type = mono_class_enum_basetype (type->data.klass);
2172 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2173 case MONO_TYPE_TYPEDBYREF:
2174 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2175 case MONO_TYPE_GENERICINST:
2176 type = &type->data.generic_class->container_class->byval_arg;
2179 case MONO_TYPE_MVAR:
2181 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2183 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2189 * target_type_is_incompatible:
2190 * @cfg: MonoCompile context
2192 * Check that the item @arg on the evaluation stack can be stored
2193 * in the target type (can be a local, or field, etc).
2194 * The cfg arg can be used to check if we need verification or just
2197 * Returns: non-0 value if arg can't be stored on a target.
2200 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2202 MonoType *simple_type;
2205 if (target->byref) {
2206 /* FIXME: check that the pointed to types match */
2207 if (arg->type == STACK_MP) {
2208 MonoClass *base_class = mono_class_from_mono_type (target);
2209 /* This is needed to handle gshared types + ldaddr */
2210 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2211 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2213 if (arg->type == STACK_PTR)
2218 simple_type = mini_get_underlying_type (target);
2219 switch (simple_type->type) {
2220 case MONO_TYPE_VOID:
2228 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2232 /* STACK_MP is needed when setting pinned locals */
2233 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2238 case MONO_TYPE_FNPTR:
2240 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2241 * in native int. (#688008).
2243 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2246 case MONO_TYPE_CLASS:
2247 case MONO_TYPE_STRING:
2248 case MONO_TYPE_OBJECT:
2249 case MONO_TYPE_SZARRAY:
2250 case MONO_TYPE_ARRAY:
2251 if (arg->type != STACK_OBJ)
2253 /* FIXME: check type compatibility */
2257 if (arg->type != STACK_I8)
2261 if (arg->type != cfg->r4_stack_type)
2265 if (arg->type != STACK_R8)
2268 case MONO_TYPE_VALUETYPE:
2269 if (arg->type != STACK_VTYPE)
2271 klass = mono_class_from_mono_type (simple_type);
2272 if (klass != arg->klass)
2275 case MONO_TYPE_TYPEDBYREF:
2276 if (arg->type != STACK_VTYPE)
2278 klass = mono_class_from_mono_type (simple_type);
2279 if (klass != arg->klass)
2282 case MONO_TYPE_GENERICINST:
2283 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2284 MonoClass *target_class;
2285 if (arg->type != STACK_VTYPE)
2287 klass = mono_class_from_mono_type (simple_type);
2288 target_class = mono_class_from_mono_type (target);
2289 /* The second cases is needed when doing partial sharing */
2290 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2294 if (arg->type != STACK_OBJ)
2296 /* FIXME: check type compatibility */
2300 case MONO_TYPE_MVAR:
2301 g_assert (cfg->gshared);
2302 if (mini_type_var_is_vt (simple_type)) {
2303 if (arg->type != STACK_VTYPE)
2306 if (arg->type != STACK_OBJ)
2311 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2317 * Prepare arguments for passing to a function call.
2318 * Return a non-zero value if the arguments can't be passed to the given
2320 * The type checks are not yet complete and some conversions may need
2321 * casts on 32 or 64 bit architectures.
2323 * FIXME: implement this using target_type_is_incompatible ()
2326 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2328 MonoType *simple_type;
2332 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2336 for (i = 0; i < sig->param_count; ++i) {
2337 if (sig->params [i]->byref) {
2338 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2342 simple_type = mini_get_underlying_type (sig->params [i]);
2344 switch (simple_type->type) {
2345 case MONO_TYPE_VOID:
2354 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2360 case MONO_TYPE_FNPTR:
2361 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2364 case MONO_TYPE_CLASS:
2365 case MONO_TYPE_STRING:
2366 case MONO_TYPE_OBJECT:
2367 case MONO_TYPE_SZARRAY:
2368 case MONO_TYPE_ARRAY:
2369 if (args [i]->type != STACK_OBJ)
2374 if (args [i]->type != STACK_I8)
2378 if (args [i]->type != cfg->r4_stack_type)
2382 if (args [i]->type != STACK_R8)
2385 case MONO_TYPE_VALUETYPE:
2386 if (simple_type->data.klass->enumtype) {
2387 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2390 if (args [i]->type != STACK_VTYPE)
2393 case MONO_TYPE_TYPEDBYREF:
2394 if (args [i]->type != STACK_VTYPE)
2397 case MONO_TYPE_GENERICINST:
2398 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2401 case MONO_TYPE_MVAR:
2403 if (args [i]->type != STACK_VTYPE)
2407 g_error ("unknown type 0x%02x in check_call_signature",
2415 callvirt_to_call (int opcode)
2418 case OP_CALL_MEMBASE:
2420 case OP_VOIDCALL_MEMBASE:
2422 case OP_FCALL_MEMBASE:
2424 case OP_RCALL_MEMBASE:
2426 case OP_VCALL_MEMBASE:
2428 case OP_LCALL_MEMBASE:
2431 g_assert_not_reached ();
2438 callvirt_to_call_reg (int opcode)
2441 case OP_CALL_MEMBASE:
2443 case OP_VOIDCALL_MEMBASE:
2444 return OP_VOIDCALL_REG;
2445 case OP_FCALL_MEMBASE:
2446 return OP_FCALL_REG;
2447 case OP_RCALL_MEMBASE:
2448 return OP_RCALL_REG;
2449 case OP_VCALL_MEMBASE:
2450 return OP_VCALL_REG;
2451 case OP_LCALL_MEMBASE:
2452 return OP_LCALL_REG;
2454 g_assert_not_reached ();
2460 /* Either METHOD or IMT_ARG needs to be set */
2462 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2466 if (COMPILE_LLVM (cfg)) {
2468 method_reg = alloc_preg (cfg);
2469 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2471 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2472 method_reg = ins->dreg;
2476 call->imt_arg_reg = method_reg;
2478 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2483 method_reg = alloc_preg (cfg);
2484 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2486 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2487 method_reg = ins->dreg;
2490 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2493 static MonoJumpInfo *
2494 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2496 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2500 ji->data.target = target;
2506 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2509 return mono_class_check_context_used (klass);
2515 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2518 return mono_method_check_context_used (method);
2524 * check_method_sharing:
2526 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2529 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2531 gboolean pass_vtable = FALSE;
2532 gboolean pass_mrgctx = FALSE;
2534 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2535 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2536 gboolean sharable = FALSE;
2538 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2542 * Pass vtable iff target method might
2543 * be shared, which means that sharing
2544 * is enabled for its class and its
2545 * context is sharable (and it's not a
2548 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2552 if (mini_method_get_context (cmethod) &&
2553 mini_method_get_context (cmethod)->method_inst) {
2554 g_assert (!pass_vtable);
2556 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2559 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2564 if (out_pass_vtable)
2565 *out_pass_vtable = pass_vtable;
2566 if (out_pass_mrgctx)
2567 *out_pass_mrgctx = pass_mrgctx;
2570 inline static MonoCallInst *
2571 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2572 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2576 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2584 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2586 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2588 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2591 call->signature = sig;
2592 call->rgctx_reg = rgctx;
2593 sig_ret = mini_get_underlying_type (sig->ret);
2595 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2598 if (mini_type_is_vtype (sig_ret)) {
2599 call->vret_var = cfg->vret_addr;
2600 //g_assert_not_reached ();
2602 } else if (mini_type_is_vtype (sig_ret)) {
2603 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2606 temp->backend.is_pinvoke = sig->pinvoke;
2609 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2610 * address of return value to increase optimization opportunities.
2611 * Before vtype decomposition, the dreg of the call ins itself represents the
2612 * fact the call modifies the return value. After decomposition, the call will
2613 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2614 * will be transformed into an LDADDR.
2616 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2617 loada->dreg = alloc_preg (cfg);
2618 loada->inst_p0 = temp;
2619 /* We reference the call too since call->dreg could change during optimization */
2620 loada->inst_p1 = call;
2621 MONO_ADD_INS (cfg->cbb, loada);
2623 call->inst.dreg = temp->dreg;
2625 call->vret_var = loada;
2626 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2627 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2629 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2630 if (COMPILE_SOFT_FLOAT (cfg)) {
2632 * If the call has a float argument, we would need to do an r8->r4 conversion using
2633 * an icall, but that cannot be done during the call sequence since it would clobber
2634 * the call registers + the stack. So we do it before emitting the call.
2636 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2638 MonoInst *in = call->args [i];
2640 if (i >= sig->hasthis)
2641 t = sig->params [i - sig->hasthis];
2643 t = &mono_defaults.int_class->byval_arg;
2644 t = mono_type_get_underlying_type (t);
2646 if (!t->byref && t->type == MONO_TYPE_R4) {
2647 MonoInst *iargs [1];
2651 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2653 /* The result will be in an int vreg */
2654 call->args [i] = conv;
2660 call->need_unbox_trampoline = unbox_trampoline;
2663 if (COMPILE_LLVM (cfg))
2664 mono_llvm_emit_call (cfg, call);
2666 mono_arch_emit_call (cfg, call);
2668 mono_arch_emit_call (cfg, call);
2671 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2672 cfg->flags |= MONO_CFG_HAS_CALLS;
2678 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2680 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2681 cfg->uses_rgctx_reg = TRUE;
2682 call->rgctx_reg = TRUE;
2684 call->rgctx_arg_reg = rgctx_reg;
2688 inline static MonoInst*
2689 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2694 gboolean check_sp = FALSE;
2696 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2697 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2699 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2704 rgctx_reg = mono_alloc_preg (cfg);
2705 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2709 if (!cfg->stack_inbalance_var)
2710 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2712 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2713 ins->dreg = cfg->stack_inbalance_var->dreg;
2714 MONO_ADD_INS (cfg->cbb, ins);
2717 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2719 call->inst.sreg1 = addr->dreg;
2722 emit_imt_argument (cfg, call, NULL, imt_arg);
2724 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2729 sp_reg = mono_alloc_preg (cfg);
2731 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2733 MONO_ADD_INS (cfg->cbb, ins);
2735 /* Restore the stack so we don't crash when throwing the exception */
2736 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2737 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2738 MONO_ADD_INS (cfg->cbb, ins);
2740 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2741 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2745 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2747 return (MonoInst*)call;
2751 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2754 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2756 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2759 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2760 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2762 #ifndef DISABLE_REMOTING
2763 gboolean might_be_remote = FALSE;
2765 gboolean virtual_ = this_ins != NULL;
2766 gboolean enable_for_aot = TRUE;
2769 MonoInst *call_target = NULL;
2771 gboolean need_unbox_trampoline;
2774 sig = mono_method_signature (method);
2776 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2777 g_assert_not_reached ();
2780 rgctx_reg = mono_alloc_preg (cfg);
2781 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2784 if (method->string_ctor) {
2785 /* Create the real signature */
2786 /* FIXME: Cache these */
2787 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2788 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2793 context_used = mini_method_check_context_used (cfg, method);
2795 #ifndef DISABLE_REMOTING
2796 might_be_remote = this_ins && sig->hasthis &&
2797 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2798 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2800 if (might_be_remote && context_used) {
2803 g_assert (cfg->gshared);
2805 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2807 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2811 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2812 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2814 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2816 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2818 #ifndef DISABLE_REMOTING
2819 if (might_be_remote)
2820 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2823 call->method = method;
2824 call->inst.flags |= MONO_INST_HAS_METHOD;
2825 call->inst.inst_left = this_ins;
2826 call->tail_call = tail;
2829 int vtable_reg, slot_reg, this_reg;
2832 this_reg = this_ins->dreg;
2834 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2835 MonoInst *dummy_use;
2837 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2839 /* Make a call to delegate->invoke_impl */
2840 call->inst.inst_basereg = this_reg;
2841 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2842 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2844 /* We must emit a dummy use here because the delegate trampoline will
2845 replace the 'this' argument with the delegate target making this activation
2846 no longer a root for the delegate.
2847 This is an issue for delegates that target collectible code such as dynamic
2848 methods of GC'able assemblies.
2850 For a test case look into #667921.
2852 FIXME: a dummy use is not the best way to do it as the local register allocator
2853 will put it on a caller save register and spil it around the call.
2854 Ideally, we would either put it on a callee save register or only do the store part.
2856 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2858 return (MonoInst*)call;
2861 if ((!cfg->compile_aot || enable_for_aot) &&
2862 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2863 (MONO_METHOD_IS_FINAL (method) &&
2864 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2865 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2867 * the method is not virtual, we just need to ensure this is not null
2868 * and then we can call the method directly.
2870 #ifndef DISABLE_REMOTING
2871 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2873 * The check above ensures method is not gshared, this is needed since
2874 * gshared methods can't have wrappers.
2876 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2880 if (!method->string_ctor)
2881 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2883 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2884 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2886 * the method is virtual, but we can statically dispatch since either
2887 * it's class or the method itself are sealed.
2888 * But first we need to ensure it's not a null reference.
2890 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2892 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2893 } else if (call_target) {
2894 vtable_reg = alloc_preg (cfg);
2895 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2897 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2898 call->inst.sreg1 = call_target->dreg;
2899 call->inst.flags &= !MONO_INST_HAS_METHOD;
2901 vtable_reg = alloc_preg (cfg);
2902 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2903 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2904 guint32 imt_slot = mono_method_get_imt_slot (method);
2905 emit_imt_argument (cfg, call, call->method, imt_arg);
2906 slot_reg = vtable_reg;
2907 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2909 slot_reg = vtable_reg;
2910 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2911 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2913 g_assert (mono_method_signature (method)->generic_param_count);
2914 emit_imt_argument (cfg, call, call->method, imt_arg);
2918 call->inst.sreg1 = slot_reg;
2919 call->inst.inst_offset = offset;
2920 call->is_virtual = TRUE;
2924 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2927 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2929 return (MonoInst*)call;
2933 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2935 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2939 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2946 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2949 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2951 return (MonoInst*)call;
2955 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2957 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2961 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2965 * mono_emit_abs_call:
2967 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2969 inline static MonoInst*
2970 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2971 MonoMethodSignature *sig, MonoInst **args)
2973 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2977 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2980 if (cfg->abs_patches == NULL)
2981 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2982 g_hash_table_insert (cfg->abs_patches, ji, ji);
2983 ins = mono_emit_native_call (cfg, ji, sig, args);
2984 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2988 static MonoMethodSignature*
2989 sig_to_rgctx_sig (MonoMethodSignature *sig)
2991 // FIXME: memory allocation
2992 MonoMethodSignature *res;
2995 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2996 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2997 res->param_count = sig->param_count + 1;
2998 for (i = 0; i < sig->param_count; ++i)
2999 res->params [i] = sig->params [i];
3000 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3004 /* Make an indirect call to FSIG passing an additional argument */
3006 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3008 MonoMethodSignature *csig;
3009 MonoInst *args_buf [16];
3011 int i, pindex, tmp_reg;
3013 /* Make a call with an rgctx/extra arg */
3014 if (fsig->param_count + 2 < 16)
3017 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3020 args [pindex ++] = orig_args [0];
3021 for (i = 0; i < fsig->param_count; ++i)
3022 args [pindex ++] = orig_args [fsig->hasthis + i];
3023 tmp_reg = alloc_preg (cfg);
3024 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3025 csig = sig_to_rgctx_sig (fsig);
3026 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3029 /* Emit an indirect call to the function descriptor ADDR */
3031 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3033 int addr_reg, arg_reg;
3034 MonoInst *call_target;
3036 g_assert (cfg->llvm_only);
3039 * addr points to a <addr, arg> pair, load both of them, and
3040 * make a call to addr, passing arg as an extra arg.
3042 addr_reg = alloc_preg (cfg);
3043 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3044 arg_reg = alloc_preg (cfg);
3045 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3047 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3051 direct_icalls_enabled (MonoCompile *cfg)
3053 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3055 if (cfg->compile_llvm)
3058 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3064 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3067 * Call the jit icall without a wrapper if possible.
3068 * The wrapper is needed for the following reasons:
3069 * - to handle exceptions thrown using mono_raise_exceptions () from the
3070 * icall function. The EH code needs the lmf frame pushed by the
3071 * wrapper to be able to unwind back to managed code.
3072 * - to be able to do stack walks for asynchronously suspended
3073 * threads when debugging.
3075 if (info->no_raise && direct_icalls_enabled (cfg)) {
3079 if (!info->wrapper_method) {
3080 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3081 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3083 mono_memory_barrier ();
3087 * Inline the wrapper method, which is basically a call to the C icall, and
3088 * an exception check.
3090 costs = inline_method (cfg, info->wrapper_method, NULL,
3091 args, NULL, cfg->real_offset, TRUE);
3092 g_assert (costs > 0);
3093 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3097 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3102 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3104 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3105 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3109 * Native code might return non register sized integers
3110 * without initializing the upper bits.
3112 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3113 case OP_LOADI1_MEMBASE:
3114 widen_op = OP_ICONV_TO_I1;
3116 case OP_LOADU1_MEMBASE:
3117 widen_op = OP_ICONV_TO_U1;
3119 case OP_LOADI2_MEMBASE:
3120 widen_op = OP_ICONV_TO_I2;
3122 case OP_LOADU2_MEMBASE:
3123 widen_op = OP_ICONV_TO_U2;
3129 if (widen_op != -1) {
3130 int dreg = alloc_preg (cfg);
3133 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3134 widen->type = ins->type;
3144 get_memcpy_method (void)
3146 static MonoMethod *memcpy_method = NULL;
3147 if (!memcpy_method) {
3148 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3150 g_error ("Old corlib found. Install a new one");
3152 return memcpy_method;
3156 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3158 MonoClassField *field;
3159 gpointer iter = NULL;
3161 while ((field = mono_class_get_fields (klass, &iter))) {
3164 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3166 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3167 if (mini_type_is_reference (mono_field_get_type (field))) {
3168 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3169 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3171 MonoClass *field_class = mono_class_from_mono_type (field->type);
3172 if (field_class->has_references)
3173 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3179 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3181 int card_table_shift_bits;
3182 gpointer card_table_mask;
3184 MonoInst *dummy_use;
3185 int nursery_shift_bits;
3186 size_t nursery_size;
3188 if (!cfg->gen_write_barriers)
3191 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3193 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3195 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3198 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3199 wbarrier->sreg1 = ptr->dreg;
3200 wbarrier->sreg2 = value->dreg;
3201 MONO_ADD_INS (cfg->cbb, wbarrier);
3202 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3203 int offset_reg = alloc_preg (cfg);
3207 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3208 if (card_table_mask)
3209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3211 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3212 * IMM's larger than 32bits.
3214 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3215 card_reg = ins->dreg;
3217 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3218 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3220 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3221 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3224 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3228 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3230 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3231 unsigned need_wb = 0;
3236 /*types with references can't have alignment smaller than sizeof(void*) */
3237 if (align < SIZEOF_VOID_P)
3240 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3241 if (size > 32 * SIZEOF_VOID_P)
3244 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3246 /* We don't unroll more than 5 stores to avoid code bloat. */
3247 if (size > 5 * SIZEOF_VOID_P) {
3248 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3249 size += (SIZEOF_VOID_P - 1);
3250 size &= ~(SIZEOF_VOID_P - 1);
3252 EMIT_NEW_ICONST (cfg, iargs [2], size);
3253 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3254 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3258 destreg = iargs [0]->dreg;
3259 srcreg = iargs [1]->dreg;
3262 dest_ptr_reg = alloc_preg (cfg);
3263 tmp_reg = alloc_preg (cfg);
3266 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3268 while (size >= SIZEOF_VOID_P) {
3269 MonoInst *load_inst;
3270 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3271 load_inst->dreg = tmp_reg;
3272 load_inst->inst_basereg = srcreg;
3273 load_inst->inst_offset = offset;
3274 MONO_ADD_INS (cfg->cbb, load_inst);
3276 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3279 emit_write_barrier (cfg, iargs [0], load_inst);
3281 offset += SIZEOF_VOID_P;
3282 size -= SIZEOF_VOID_P;
3285 /*tmp += sizeof (void*)*/
3286 if (size >= SIZEOF_VOID_P) {
3287 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3288 MONO_ADD_INS (cfg->cbb, iargs [0]);
3292 /* Those cannot be references since size < sizeof (void*) */
3294 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3301 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3302 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3308 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3318 * Emit code to copy a valuetype of type @klass whose address is stored in
3319 * @src->dreg to memory whose address is stored at @dest->dreg.
3322 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3324 MonoInst *iargs [4];
3327 MonoMethod *memcpy_method;
3328 MonoInst *size_ins = NULL;
3329 MonoInst *memcpy_ins = NULL;
3333 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3336 * This check breaks with spilled vars... need to handle it during verification anyway.
3337 * g_assert (klass && klass == src->klass && klass == dest->klass);
3340 if (mini_is_gsharedvt_klass (klass)) {
3342 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3343 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3347 n = mono_class_native_size (klass, &align);
3349 n = mono_class_value_size (klass, &align);
3351 /* if native is true there should be no references in the struct */
3352 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3353 /* Avoid barriers when storing to the stack */
3354 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3355 (dest->opcode == OP_LDADDR))) {
3361 context_used = mini_class_check_context_used (cfg, klass);
3363 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3364 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3366 } else if (context_used) {
3367 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3369 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3370 if (!cfg->compile_aot)
3371 mono_class_compute_gc_descriptor (klass);
3375 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3377 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3382 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3383 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3384 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3389 iargs [2] = size_ins;
3391 EMIT_NEW_ICONST (cfg, iargs [2], n);
3393 memcpy_method = get_memcpy_method ();
3395 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3397 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3402 get_memset_method (void)
3404 static MonoMethod *memset_method = NULL;
3405 if (!memset_method) {
3406 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3408 g_error ("Old corlib found. Install a new one");
3410 return memset_method;
3414 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3416 MonoInst *iargs [3];
3419 MonoMethod *memset_method;
3420 MonoInst *size_ins = NULL;
3421 MonoInst *bzero_ins = NULL;
3422 static MonoMethod *bzero_method;
3424 /* FIXME: Optimize this for the case when dest is an LDADDR */
3425 mono_class_init (klass);
3426 if (mini_is_gsharedvt_klass (klass)) {
3427 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3428 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3430 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3431 g_assert (bzero_method);
3433 iargs [1] = size_ins;
3434 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3438 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3440 n = mono_class_value_size (klass, &align);
3442 if (n <= sizeof (gpointer) * 8) {
3443 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3446 memset_method = get_memset_method ();
3448 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3449 EMIT_NEW_ICONST (cfg, iargs [2], n);
3450 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3457 * Emit IR to return either the this pointer for instance method,
3458 * or the mrgctx for static methods.
3461 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3463 MonoInst *this_ins = NULL;
3465 g_assert (cfg->gshared);
3467 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3468 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3469 !method->klass->valuetype)
3470 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3472 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3473 MonoInst *mrgctx_loc, *mrgctx_var;
3475 g_assert (!this_ins);
3476 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3478 mrgctx_loc = mono_get_vtable_var (cfg);
3479 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3482 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3483 MonoInst *vtable_loc, *vtable_var;
3485 g_assert (!this_ins);
3487 vtable_loc = mono_get_vtable_var (cfg);
3488 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3490 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3491 MonoInst *mrgctx_var = vtable_var;
3494 vtable_reg = alloc_preg (cfg);
3495 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3496 vtable_var->type = STACK_PTR;
3504 vtable_reg = alloc_preg (cfg);
3505 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3510 static MonoJumpInfoRgctxEntry *
3511 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3513 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3514 res->method = method;
3515 res->in_mrgctx = in_mrgctx;
3516 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3517 res->data->type = patch_type;
3518 res->data->data.target = patch_data;
3519 res->info_type = info_type;
3524 static inline MonoInst*
3525 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3527 MonoInst *args [16];
3530 // FIXME: No fastpath since the slot is not a compile time constant
3532 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3533 if (entry->in_mrgctx)
3534 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3536 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3540 * FIXME: This can be called during decompose, which is a problem since it creates
3542 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3544 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3546 MonoBasicBlock *is_null_bb, *end_bb;
3547 MonoInst *res, *ins, *call;
3550 slot = mini_get_rgctx_entry_slot (entry);
3552 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3553 index = MONO_RGCTX_SLOT_INDEX (slot);
3555 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3556 for (depth = 0; ; ++depth) {
3557 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3559 if (index < size - 1)
3564 NEW_BBLOCK (cfg, end_bb);
3565 NEW_BBLOCK (cfg, is_null_bb);
3568 rgctx_reg = rgctx->dreg;
3570 rgctx_reg = alloc_preg (cfg);
3572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3573 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3574 NEW_BBLOCK (cfg, is_null_bb);
3576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3580 for (i = 0; i < depth; ++i) {
3581 int array_reg = alloc_preg (cfg);
3583 /* load ptr to next array */
3584 if (mrgctx && i == 0)
3585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3588 rgctx_reg = array_reg;
3589 /* is the ptr null? */
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3591 /* if yes, jump to actual trampoline */
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3596 val_reg = alloc_preg (cfg);
3597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3598 /* is the slot null? */
3599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3600 /* if yes, jump to actual trampoline */
3601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3604 res_reg = alloc_preg (cfg);
3605 MONO_INST_NEW (cfg, ins, OP_MOVE);
3606 ins->dreg = res_reg;
3607 ins->sreg1 = val_reg;
3608 MONO_ADD_INS (cfg->cbb, ins);
3610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3613 MONO_START_BB (cfg, is_null_bb);
3615 EMIT_NEW_ICONST (cfg, args [1], index);
3617 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3619 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3620 MONO_INST_NEW (cfg, ins, OP_MOVE);
3621 ins->dreg = res_reg;
3622 ins->sreg1 = call->dreg;
3623 MONO_ADD_INS (cfg->cbb, ins);
3624 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3626 MONO_START_BB (cfg, end_bb);
3635 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3638 static inline MonoInst*
3639 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3642 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3644 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3648 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3649 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3651 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3652 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3654 return emit_rgctx_fetch (cfg, rgctx, entry);
3658 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3659 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3661 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3662 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3664 return emit_rgctx_fetch (cfg, rgctx, entry);
3668 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3669 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3671 MonoJumpInfoGSharedVtCall *call_info;
3672 MonoJumpInfoRgctxEntry *entry;
3675 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3676 call_info->sig = sig;
3677 call_info->method = cmethod;
3679 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3680 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3682 return emit_rgctx_fetch (cfg, rgctx, entry);
3686 * emit_get_rgctx_virt_method:
3688 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3691 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3692 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3694 MonoJumpInfoVirtMethod *info;
3695 MonoJumpInfoRgctxEntry *entry;
3698 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3699 info->klass = klass;
3700 info->method = virt_method;
3702 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3703 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3705 return emit_rgctx_fetch (cfg, rgctx, entry);
3709 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3710 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3712 MonoJumpInfoRgctxEntry *entry;
3715 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3716 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3718 return emit_rgctx_fetch (cfg, rgctx, entry);
3722 * emit_get_rgctx_method:
3724 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3725 * normal constants, else emit a load from the rgctx.
3728 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3729 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3731 if (!context_used) {
3734 switch (rgctx_type) {
3735 case MONO_RGCTX_INFO_METHOD:
3736 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3738 case MONO_RGCTX_INFO_METHOD_RGCTX:
3739 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3742 g_assert_not_reached ();
3745 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3746 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3748 return emit_rgctx_fetch (cfg, rgctx, entry);
3753 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3754 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3756 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3757 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3759 return emit_rgctx_fetch (cfg, rgctx, entry);
3763 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3765 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3766 MonoRuntimeGenericContextInfoTemplate *template_;
3771 for (i = 0; i < info->num_entries; ++i) {
3772 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3774 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3778 if (info->num_entries == info->count_entries) {
3779 MonoRuntimeGenericContextInfoTemplate *new_entries;
3780 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3782 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3784 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3785 info->entries = new_entries;
3786 info->count_entries = new_count_entries;
3789 idx = info->num_entries;
3790 template_ = &info->entries [idx];
3791 template_->info_type = rgctx_type;
3792 template_->data = data;
3794 info->num_entries ++;
3800 * emit_get_gsharedvt_info:
3802 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3805 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3810 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3811 /* Load info->entries [idx] */
3812 dreg = alloc_preg (cfg);
3813 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3819 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3821 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3825 * On return the caller must check @klass for load errors.
3828 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3830 MonoInst *vtable_arg;
3833 context_used = mini_class_check_context_used (cfg, klass);
3836 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3837 klass, MONO_RGCTX_INFO_VTABLE);
3839 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3843 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3846 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3850 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3851 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3853 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3854 ins->sreg1 = vtable_arg->dreg;
3855 MONO_ADD_INS (cfg->cbb, ins);
3857 static int byte_offset = -1;
3858 static guint8 bitmask;
3859 int bits_reg, inited_reg;
3860 MonoBasicBlock *inited_bb;
3861 MonoInst *args [16];
3863 if (byte_offset < 0)
3864 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3866 bits_reg = alloc_ireg (cfg);
3867 inited_reg = alloc_ireg (cfg);
3869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3870 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3872 NEW_BBLOCK (cfg, inited_bb);
3874 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3875 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3877 args [0] = vtable_arg;
3878 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3880 MONO_START_BB (cfg, inited_bb);
3885 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3889 if (cfg->gen_seq_points && cfg->method == method) {
3890 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3892 ins->flags |= MONO_INST_NONEMPTY_STACK;
3893 MONO_ADD_INS (cfg->cbb, ins);
3898 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3900 if (mini_get_debug_options ()->better_cast_details) {
3901 int vtable_reg = alloc_preg (cfg);
3902 int klass_reg = alloc_preg (cfg);
3903 MonoBasicBlock *is_null_bb = NULL;
3905 int to_klass_reg, context_used;
3908 NEW_BBLOCK (cfg, is_null_bb);
3910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3911 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3914 tls_get = mono_get_jit_tls_intrinsic (cfg);
3916 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3920 MONO_ADD_INS (cfg->cbb, tls_get);
3921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3924 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3926 context_used = mini_class_check_context_used (cfg, klass);
3928 MonoInst *class_ins;
3930 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3931 to_klass_reg = class_ins->dreg;
3933 to_klass_reg = alloc_preg (cfg);
3934 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3936 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3939 MONO_START_BB (cfg, is_null_bb);
3944 reset_cast_details (MonoCompile *cfg)
3946 /* Reset the variables holding the cast details */
3947 if (mini_get_debug_options ()->better_cast_details) {
3948 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3950 MONO_ADD_INS (cfg->cbb, tls_get);
3951 /* It is enough to reset the from field */
3952 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3957 * On return the caller must check @array_class for load errors
3960 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3962 int vtable_reg = alloc_preg (cfg);
3965 context_used = mini_class_check_context_used (cfg, array_class);
3967 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3969 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3971 if (cfg->opt & MONO_OPT_SHARED) {
3972 int class_reg = alloc_preg (cfg);
3975 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3976 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3977 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3978 } else if (context_used) {
3979 MonoInst *vtable_ins;
3981 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3982 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3984 if (cfg->compile_aot) {
3988 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3990 vt_reg = alloc_preg (cfg);
3991 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3992 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3995 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3997 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4001 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4003 reset_cast_details (cfg);
4007 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4008 * generic code is generated.
4011 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4013 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4016 MonoInst *rgctx, *addr;
4018 /* FIXME: What if the class is shared? We might not
4019 have to get the address of the method from the
4021 addr = emit_get_rgctx_method (cfg, context_used, method,
4022 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4023 if (cfg->llvm_only && cfg->gsharedvt) {
4024 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4026 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4028 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4031 gboolean pass_vtable, pass_mrgctx;
4032 MonoInst *rgctx_arg = NULL;
4034 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4035 g_assert (!pass_mrgctx);
4038 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4041 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4044 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4049 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4053 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4054 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4055 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4056 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4058 obj_reg = sp [0]->dreg;
4059 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4060 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4062 /* FIXME: generics */
4063 g_assert (klass->rank == 0);
4066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4067 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4073 MonoInst *element_class;
4075 /* This assertion is from the unboxcast insn */
4076 g_assert (klass->rank == 0);
4078 element_class = emit_get_rgctx_klass (cfg, context_used,
4079 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4081 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4082 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4084 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4085 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4086 reset_cast_details (cfg);
4089 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4090 MONO_ADD_INS (cfg->cbb, add);
4091 add->type = STACK_MP;
4098 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4100 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4101 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4105 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4111 args [1] = klass_inst;
4114 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4116 NEW_BBLOCK (cfg, is_ref_bb);
4117 NEW_BBLOCK (cfg, is_nullable_bb);
4118 NEW_BBLOCK (cfg, end_bb);
4119 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4126 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4127 addr_reg = alloc_dreg (cfg, STACK_MP);
4131 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4132 MONO_ADD_INS (cfg->cbb, addr);
4134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4137 MONO_START_BB (cfg, is_ref_bb);
4139 /* Save the ref to a temporary */
4140 dreg = alloc_ireg (cfg);
4141 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4142 addr->dreg = addr_reg;
4143 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4147 MONO_START_BB (cfg, is_nullable_bb);
4150 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4151 MonoInst *unbox_call;
4152 MonoMethodSignature *unbox_sig;
4154 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4155 unbox_sig->ret = &klass->byval_arg;
4156 unbox_sig->param_count = 1;
4157 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4160 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4162 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4164 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4165 addr->dreg = addr_reg;
4168 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4171 MONO_START_BB (cfg, end_bb);
4174 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4180 * Returns NULL and set the cfg exception on error.
4183 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4185 MonoInst *iargs [2];
4190 MonoRgctxInfoType rgctx_info;
4191 MonoInst *iargs [2];
4192 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4194 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4196 if (cfg->opt & MONO_OPT_SHARED)
4197 rgctx_info = MONO_RGCTX_INFO_KLASS;
4199 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4200 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4202 if (cfg->opt & MONO_OPT_SHARED) {
4203 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4205 alloc_ftn = ves_icall_object_new;
4208 alloc_ftn = ves_icall_object_new_specific;
4211 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4212 if (known_instance_size) {
4213 int size = mono_class_instance_size (klass);
4214 if (size < sizeof (MonoObject))
4215 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4217 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4219 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4222 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4225 if (cfg->opt & MONO_OPT_SHARED) {
4226 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4227 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4229 alloc_ftn = ves_icall_object_new;
4230 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4231 /* This happens often in argument checking code, eg. throw new FooException... */
4232 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4233 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4234 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4236 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4237 MonoMethod *managed_alloc = NULL;
4241 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4242 cfg->exception_ptr = klass;
4246 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4248 if (managed_alloc) {
4249 int size = mono_class_instance_size (klass);
4250 if (size < sizeof (MonoObject))
4251 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4253 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4254 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4255 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4257 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4259 guint32 lw = vtable->klass->instance_size;
4260 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4261 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4262 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4265 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4269 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4273 * Returns NULL and set the cfg exception on error.
4276 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4278 MonoInst *alloc, *ins;
4280 if (mono_class_is_nullable (klass)) {
4281 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4284 if (cfg->llvm_only && cfg->gsharedvt) {
4285 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4286 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4287 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4289 /* FIXME: What if the class is shared? We might not
4290 have to get the method address from the RGCTX. */
4291 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4292 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4293 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4295 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4298 gboolean pass_vtable, pass_mrgctx;
4299 MonoInst *rgctx_arg = NULL;
4301 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4302 g_assert (!pass_mrgctx);
4305 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4308 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4311 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4315 if (mini_is_gsharedvt_klass (klass)) {
4316 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4317 MonoInst *res, *is_ref, *src_var, *addr;
4320 dreg = alloc_ireg (cfg);
4322 NEW_BBLOCK (cfg, is_ref_bb);
4323 NEW_BBLOCK (cfg, is_nullable_bb);
4324 NEW_BBLOCK (cfg, end_bb);
4325 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4329 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4330 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4333 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4336 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4337 ins->opcode = OP_STOREV_MEMBASE;
4339 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4340 res->type = STACK_OBJ;
4342 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4345 MONO_START_BB (cfg, is_ref_bb);
4347 /* val is a vtype, so has to load the value manually */
4348 src_var = get_vreg_to_inst (cfg, val->dreg);
4350 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4351 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4356 MONO_START_BB (cfg, is_nullable_bb);
4359 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4360 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4362 MonoMethodSignature *box_sig;
4365 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4366 * construct that method at JIT time, so have to do things by hand.
4368 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4369 box_sig->ret = &mono_defaults.object_class->byval_arg;
4370 box_sig->param_count = 1;
4371 box_sig->params [0] = &klass->byval_arg;
4374 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4376 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4377 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4378 res->type = STACK_OBJ;
4382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4384 MONO_START_BB (cfg, end_bb);
4388 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4392 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4398 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4401 MonoGenericContainer *container;
4402 MonoGenericInst *ginst;
4404 if (klass->generic_class) {
4405 container = klass->generic_class->container_class->generic_container;
4406 ginst = klass->generic_class->context.class_inst;
4407 } else if (klass->generic_container && context_used) {
4408 container = klass->generic_container;
4409 ginst = container->context.class_inst;
4414 for (i = 0; i < container->type_argc; ++i) {
4416 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4418 type = ginst->type_argv [i];
4419 if (mini_type_is_reference (type))
4425 static GHashTable* direct_icall_type_hash;
4428 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4430 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4431 if (!direct_icalls_enabled (cfg))
4435 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4436 * Whitelist a few icalls for now.
4438 if (!direct_icall_type_hash) {
4439 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4441 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4442 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4443 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4444 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4445 mono_memory_barrier ();
4446 direct_icall_type_hash = h;
4449 if (cmethod->klass == mono_defaults.math_class)
4451 /* No locking needed */
4452 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4457 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4460 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4462 MonoMethod *mono_castclass;
4465 mono_castclass = mono_marshal_get_castclass_with_cache ();
4467 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4468 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4469 reset_cast_details (cfg);
4475 get_castclass_cache_idx (MonoCompile *cfg)
4477 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4478 cfg->castclass_cache_index ++;
4479 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4483 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4492 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4495 idx = get_castclass_cache_idx (cfg);
4496 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4498 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4499 return emit_castclass_with_cache (cfg, klass, args);
4503 * Returns NULL and set the cfg exception on error.
4506 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4508 MonoBasicBlock *is_null_bb;
4509 int obj_reg = src->dreg;
4510 int vtable_reg = alloc_preg (cfg);
4512 MonoInst *klass_inst = NULL, *res;
4514 context_used = mini_class_check_context_used (cfg, klass);
4516 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4517 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4518 (*inline_costs) += 2;
4520 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4521 MonoMethod *mono_castclass;
4522 MonoInst *iargs [1];
4525 mono_castclass = mono_marshal_get_castclass (klass);
4528 save_cast_details (cfg, klass, src->dreg, TRUE);
4529 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4530 iargs, ip, cfg->real_offset, TRUE);
4531 reset_cast_details (cfg);
4532 CHECK_CFG_EXCEPTION;
4533 g_assert (costs > 0);
4535 cfg->real_offset += 5;
4537 (*inline_costs) += costs;
4545 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4546 MonoInst *cache_ins;
4548 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4553 /* klass - it's the second element of the cache entry*/
4554 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4557 args [2] = cache_ins;
4559 return emit_castclass_with_cache (cfg, klass, args);
4562 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4565 NEW_BBLOCK (cfg, is_null_bb);
4567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4570 save_cast_details (cfg, klass, obj_reg, FALSE);
4572 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4574 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4576 int klass_reg = alloc_preg (cfg);
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4580 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4581 /* the remoting code is broken, access the class for now */
4582 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4583 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4585 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4586 cfg->exception_ptr = klass;
4589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4594 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4597 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4601 MONO_START_BB (cfg, is_null_bb);
4603 reset_cast_details (cfg);
4612 * Returns NULL and set the cfg exception on error.
4615 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4618 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4619 int obj_reg = src->dreg;
4620 int vtable_reg = alloc_preg (cfg);
4621 int res_reg = alloc_ireg_ref (cfg);
4622 MonoInst *klass_inst = NULL;
4627 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4628 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4629 MonoInst *cache_ins;
4631 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4636 /* klass - it's the second element of the cache entry*/
4637 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4640 args [2] = cache_ins;
4642 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4645 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4648 NEW_BBLOCK (cfg, is_null_bb);
4649 NEW_BBLOCK (cfg, false_bb);
4650 NEW_BBLOCK (cfg, end_bb);
4652 /* Do the assignment at the beginning, so the other assignment can be if converted */
4653 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4654 ins->type = STACK_OBJ;
4657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4658 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4662 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4663 g_assert (!context_used);
4664 /* the is_null_bb target simply copies the input register to the output */
4665 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4667 int klass_reg = alloc_preg (cfg);
4670 int rank_reg = alloc_preg (cfg);
4671 int eclass_reg = alloc_preg (cfg);
4673 g_assert (!context_used);
4674 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4679 if (klass->cast_class == mono_defaults.object_class) {
4680 int parent_reg = alloc_preg (cfg);
4681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4682 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4683 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4685 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4686 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4687 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4689 } else if (klass->cast_class == mono_defaults.enum_class) {
4690 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4692 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4693 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4695 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4696 /* Check that the object is a vector too */
4697 int bounds_reg = alloc_preg (cfg);
4698 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4703 /* the is_null_bb target simply copies the input register to the output */
4704 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4706 } else if (mono_class_is_nullable (klass)) {
4707 g_assert (!context_used);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4709 /* the is_null_bb target simply copies the input register to the output */
4710 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4712 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4713 g_assert (!context_used);
4714 /* the remoting code is broken, access the class for now */
4715 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4716 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4718 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4719 cfg->exception_ptr = klass;
4722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4731 /* the is_null_bb target simply copies the input register to the output */
4732 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4737 MONO_START_BB (cfg, false_bb);
4739 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4742 MONO_START_BB (cfg, is_null_bb);
4744 MONO_START_BB (cfg, end_bb);
4750 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4752 /* This opcode takes as input an object reference and a class, and returns:
4753 0) if the object is an instance of the class,
4754 1) if the object is not instance of the class,
4755 2) if the object is a proxy whose type cannot be determined */
4758 #ifndef DISABLE_REMOTING
4759 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4761 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4763 int obj_reg = src->dreg;
4764 int dreg = alloc_ireg (cfg);
4766 #ifndef DISABLE_REMOTING
4767 int klass_reg = alloc_preg (cfg);
4770 NEW_BBLOCK (cfg, true_bb);
4771 NEW_BBLOCK (cfg, false_bb);
4772 NEW_BBLOCK (cfg, end_bb);
4773 #ifndef DISABLE_REMOTING
4774 NEW_BBLOCK (cfg, false2_bb);
4775 NEW_BBLOCK (cfg, no_proxy_bb);
4778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4779 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4781 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4782 #ifndef DISABLE_REMOTING
4783 NEW_BBLOCK (cfg, interface_fail_bb);
4786 tmp_reg = alloc_preg (cfg);
4787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4788 #ifndef DISABLE_REMOTING
4789 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4790 MONO_START_BB (cfg, interface_fail_bb);
4791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4793 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4795 tmp_reg = alloc_preg (cfg);
4796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4800 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4803 #ifndef DISABLE_REMOTING
4804 tmp_reg = alloc_preg (cfg);
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4808 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4809 tmp_reg = alloc_preg (cfg);
4810 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4813 tmp_reg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4816 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4818 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4819 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4821 MONO_START_BB (cfg, no_proxy_bb);
4823 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4825 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4829 MONO_START_BB (cfg, false_bb);
4831 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4832 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4834 #ifndef DISABLE_REMOTING
4835 MONO_START_BB (cfg, false2_bb);
4837 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4841 MONO_START_BB (cfg, true_bb);
4843 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4845 MONO_START_BB (cfg, end_bb);
4848 MONO_INST_NEW (cfg, ins, OP_ICONST);
4850 ins->type = STACK_I4;
4856 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4858 /* This opcode takes as input an object reference and a class, and returns:
4859 0) if the object is an instance of the class,
4860 1) if the object is a proxy whose type cannot be determined
4861 an InvalidCastException exception is thrown otherwhise*/
4864 #ifndef DISABLE_REMOTING
4865 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4867 MonoBasicBlock *ok_result_bb;
4869 int obj_reg = src->dreg;
4870 int dreg = alloc_ireg (cfg);
4871 int tmp_reg = alloc_preg (cfg);
4873 #ifndef DISABLE_REMOTING
4874 int klass_reg = alloc_preg (cfg);
4875 NEW_BBLOCK (cfg, end_bb);
4878 NEW_BBLOCK (cfg, ok_result_bb);
4880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4883 save_cast_details (cfg, klass, obj_reg, FALSE);
4885 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4886 #ifndef DISABLE_REMOTING
4887 NEW_BBLOCK (cfg, interface_fail_bb);
4889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4890 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4891 MONO_START_BB (cfg, interface_fail_bb);
4892 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4894 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4896 tmp_reg = alloc_preg (cfg);
4897 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4899 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4901 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4902 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4905 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4909 #ifndef DISABLE_REMOTING
4910 NEW_BBLOCK (cfg, no_proxy_bb);
4912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4914 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4916 tmp_reg = alloc_preg (cfg);
4917 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4920 tmp_reg = alloc_preg (cfg);
4921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4925 NEW_BBLOCK (cfg, fail_1_bb);
4927 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4929 MONO_START_BB (cfg, fail_1_bb);
4931 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4934 MONO_START_BB (cfg, no_proxy_bb);
4936 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4938 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4942 MONO_START_BB (cfg, ok_result_bb);
4944 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4946 #ifndef DISABLE_REMOTING
4947 MONO_START_BB (cfg, end_bb);
4951 MONO_INST_NEW (cfg, ins, OP_ICONST);
4953 ins->type = STACK_I4;
4958 static G_GNUC_UNUSED MonoInst*
4959 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4961 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4962 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4965 switch (enum_type->type) {
4968 #if SIZEOF_REGISTER == 8
4980 MonoInst *load, *and_, *cmp, *ceq;
4981 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4982 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4983 int dest_reg = alloc_ireg (cfg);
4985 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4986 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4987 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4988 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4990 ceq->type = STACK_I4;
4993 load = mono_decompose_opcode (cfg, load);
4994 and_ = mono_decompose_opcode (cfg, and_);
4995 cmp = mono_decompose_opcode (cfg, cmp);
4996 ceq = mono_decompose_opcode (cfg, ceq);
5004 * Returns NULL and set the cfg exception on error.
5006 static G_GNUC_UNUSED MonoInst*
5007 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5011 gpointer trampoline;
5012 MonoInst *obj, *method_ins, *tramp_ins;
5016 if (virtual_ && !cfg->llvm_only) {
5017 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5020 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5024 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5028 /* Inline the contents of mono_delegate_ctor */
5030 /* Set target field */
5031 /* Optimize away setting of NULL target */
5032 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5033 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5034 if (cfg->gen_write_barriers) {
5035 dreg = alloc_preg (cfg);
5036 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5037 emit_write_barrier (cfg, ptr, target);
5041 /* Set method field */
5042 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5046 * To avoid looking up the compiled code belonging to the target method
5047 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5048 * store it, and we fill it after the method has been compiled.
5050 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5051 MonoInst *code_slot_ins;
5054 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5056 domain = mono_domain_get ();
5057 mono_domain_lock (domain);
5058 if (!domain_jit_info (domain)->method_code_hash)
5059 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5060 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5062 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5063 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5065 mono_domain_unlock (domain);
5067 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5069 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5072 if (cfg->llvm_only) {
5073 MonoInst *args [16];
5078 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5079 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5082 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5088 if (cfg->compile_aot) {
5089 MonoDelegateClassMethodPair *del_tramp;
5091 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5092 del_tramp->klass = klass;
5093 del_tramp->method = context_used ? NULL : method;
5094 del_tramp->is_virtual = virtual_;
5095 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5098 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5100 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5101 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5104 /* Set invoke_impl field */
5106 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5108 dreg = alloc_preg (cfg);
5109 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5112 dreg = alloc_preg (cfg);
5113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5114 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5117 dreg = alloc_preg (cfg);
5118 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5119 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5121 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5127 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5129 MonoJitICallInfo *info;
5131 /* Need to register the icall so it gets an icall wrapper */
5132 info = mono_get_array_new_va_icall (rank);
5134 cfg->flags |= MONO_CFG_HAS_VARARGS;
5136 /* mono_array_new_va () needs a vararg calling convention */
5137 cfg->exception_message = g_strdup ("array-new");
5138 cfg->disable_llvm = TRUE;
5140 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5141 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5145 * handle_constrained_gsharedvt_call:
5147 * Handle constrained calls where the receiver is a gsharedvt type.
5148 * Return the instruction representing the call. Set the cfg exception on failure.
5151 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5152 gboolean *ref_emit_widen)
5154 MonoInst *ins = NULL;
5155 gboolean emit_widen = *ref_emit_widen;
5158 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5159 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5160 * pack the arguments into an array, and do the rest of the work in in an icall.
5162 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5163 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5164 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5165 MonoInst *args [16];
5168 * This case handles calls to
5169 * - object:ToString()/Equals()/GetHashCode(),
5170 * - System.IComparable<T>:CompareTo()
5171 * - System.IEquatable<T>:Equals ()
5172 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5176 if (mono_method_check_context_used (cmethod))
5177 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5179 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5180 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5182 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5183 if (fsig->hasthis && fsig->param_count) {
5184 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5185 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5186 ins->dreg = alloc_preg (cfg);
5187 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5188 MONO_ADD_INS (cfg->cbb, ins);
5191 if (mini_is_gsharedvt_type (fsig->params [0])) {
5192 int addr_reg, deref_arg_reg;
5194 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5195 deref_arg_reg = alloc_preg (cfg);
5196 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5197 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5199 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5200 addr_reg = ins->dreg;
5201 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5203 EMIT_NEW_ICONST (cfg, args [3], 0);
5204 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5207 EMIT_NEW_ICONST (cfg, args [3], 0);
5208 EMIT_NEW_ICONST (cfg, args [4], 0);
5210 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5213 if (mini_is_gsharedvt_type (fsig->ret)) {
5214 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5215 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5219 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5220 MONO_ADD_INS (cfg->cbb, add);
5222 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5223 MONO_ADD_INS (cfg->cbb, ins);
5224 /* ins represents the call result */
5227 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5230 *ref_emit_widen = emit_widen;
5239 mono_emit_load_got_addr (MonoCompile *cfg)
5241 MonoInst *getaddr, *dummy_use;
5243 if (!cfg->got_var || cfg->got_var_allocated)
5246 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5247 getaddr->cil_code = cfg->header->code;
5248 getaddr->dreg = cfg->got_var->dreg;
5250 /* Add it to the start of the first bblock */
5251 if (cfg->bb_entry->code) {
5252 getaddr->next = cfg->bb_entry->code;
5253 cfg->bb_entry->code = getaddr;
5256 MONO_ADD_INS (cfg->bb_entry, getaddr);
5258 cfg->got_var_allocated = TRUE;
5261 * Add a dummy use to keep the got_var alive, since real uses might
5262 * only be generated by the back ends.
5263 * Add it to end_bblock, so the variable's lifetime covers the whole
5265 * It would be better to make the usage of the got var explicit in all
5266 * cases when the backend needs it (i.e. calls, throw etc.), so this
5267 * wouldn't be needed.
5269 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5270 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5273 static int inline_limit;
5274 static gboolean inline_limit_inited;
5277 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5279 MonoMethodHeaderSummary header;
5281 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5282 MonoMethodSignature *sig = mono_method_signature (method);
5286 if (cfg->disable_inline)
5291 if (cfg->inline_depth > 10)
5294 if (!mono_method_get_header_summary (method, &header))
5297 /*runtime, icall and pinvoke are checked by summary call*/
5298 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5299 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5300 (mono_class_is_marshalbyref (method->klass)) ||
5304 /* also consider num_locals? */
5305 /* Do the size check early to avoid creating vtables */
5306 if (!inline_limit_inited) {
5307 if (g_getenv ("MONO_INLINELIMIT"))
5308 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5310 inline_limit = INLINE_LENGTH_LIMIT;
5311 inline_limit_inited = TRUE;
5313 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5317 * if we can initialize the class of the method right away, we do,
5318 * otherwise we don't allow inlining if the class needs initialization,
5319 * since it would mean inserting a call to mono_runtime_class_init()
5320 * inside the inlined code
5322 if (!(cfg->opt & MONO_OPT_SHARED)) {
5323 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5324 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5325 vtable = mono_class_vtable (cfg->domain, method->klass);
5328 if (!cfg->compile_aot)
5329 mono_runtime_class_init (vtable);
5330 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5331 if (cfg->run_cctors && method->klass->has_cctor) {
5332 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5333 if (!method->klass->runtime_info)
5334 /* No vtable created yet */
5336 vtable = mono_class_vtable (cfg->domain, method->klass);
5339 /* This makes so that inline cannot trigger */
5340 /* .cctors: too many apps depend on them */
5341 /* running with a specific order... */
5342 if (! vtable->initialized)
5344 mono_runtime_class_init (vtable);
5346 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5347 if (!method->klass->runtime_info)
5348 /* No vtable created yet */
5350 vtable = mono_class_vtable (cfg->domain, method->klass);
5353 if (!vtable->initialized)
5358 * If we're compiling for shared code
5359 * the cctor will need to be run at aot method load time, for example,
5360 * or at the end of the compilation of the inlining method.
5362 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5366 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5367 if (mono_arch_is_soft_float ()) {
5369 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5371 for (i = 0; i < sig->param_count; ++i)
5372 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5377 if (g_list_find (cfg->dont_inline, method))
5384 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5386 if (!cfg->compile_aot) {
5388 if (vtable->initialized)
5392 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5393 if (cfg->method == method)
5397 if (!mono_class_needs_cctor_run (klass, method))
5400 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5401 /* The initialization is already done before the method is called */
5408 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5412 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5415 if (mini_is_gsharedvt_variable_klass (klass)) {
5418 mono_class_init (klass);
5419 size = mono_class_array_element_size (klass);
5422 mult_reg = alloc_preg (cfg);
5423 array_reg = arr->dreg;
5424 index_reg = index->dreg;
5426 #if SIZEOF_REGISTER == 8
5427 /* The array reg is 64 bits but the index reg is only 32 */
5428 if (COMPILE_LLVM (cfg)) {
5430 index2_reg = index_reg;
5432 index2_reg = alloc_preg (cfg);
5433 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5436 if (index->type == STACK_I8) {
5437 index2_reg = alloc_preg (cfg);
5438 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5440 index2_reg = index_reg;
5445 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5447 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5448 if (size == 1 || size == 2 || size == 4 || size == 8) {
5449 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5451 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5452 ins->klass = mono_class_get_element_class (klass);
5453 ins->type = STACK_MP;
5459 add_reg = alloc_ireg_mp (cfg);
5462 MonoInst *rgctx_ins;
5465 g_assert (cfg->gshared);
5466 context_used = mini_class_check_context_used (cfg, klass);
5467 g_assert (context_used);
5468 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5469 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5473 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5474 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5475 ins->klass = mono_class_get_element_class (klass);
5476 ins->type = STACK_MP;
5477 MONO_ADD_INS (cfg->cbb, ins);
5483 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5485 int bounds_reg = alloc_preg (cfg);
5486 int add_reg = alloc_ireg_mp (cfg);
5487 int mult_reg = alloc_preg (cfg);
5488 int mult2_reg = alloc_preg (cfg);
5489 int low1_reg = alloc_preg (cfg);
5490 int low2_reg = alloc_preg (cfg);
5491 int high1_reg = alloc_preg (cfg);
5492 int high2_reg = alloc_preg (cfg);
5493 int realidx1_reg = alloc_preg (cfg);
5494 int realidx2_reg = alloc_preg (cfg);
5495 int sum_reg = alloc_preg (cfg);
5496 int index1, index2, tmpreg;
5500 mono_class_init (klass);
5501 size = mono_class_array_element_size (klass);
5503 index1 = index_ins1->dreg;
5504 index2 = index_ins2->dreg;
5506 #if SIZEOF_REGISTER == 8
5507 /* The array reg is 64 bits but the index reg is only 32 */
5508 if (COMPILE_LLVM (cfg)) {
5511 tmpreg = alloc_preg (cfg);
5512 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5514 tmpreg = alloc_preg (cfg);
5515 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5519 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5523 /* range checking */
5524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5525 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5527 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5528 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5529 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5531 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5532 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5533 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5535 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5536 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5537 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5538 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5539 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5540 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5541 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5543 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5544 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5546 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5547 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5549 ins->type = STACK_MP;
5551 MONO_ADD_INS (cfg->cbb, ins);
5557 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5561 MonoMethod *addr_method;
5563 MonoClass *eclass = cmethod->klass->element_class;
5565 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5568 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5570 /* emit_ldelema_2 depends on OP_LMUL */
5571 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5572 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5575 if (mini_is_gsharedvt_variable_klass (eclass))
5578 element_size = mono_class_array_element_size (eclass);
5579 addr_method = mono_marshal_get_array_address (rank, element_size);
5580 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5585 static MonoBreakPolicy
5586 always_insert_breakpoint (MonoMethod *method)
5588 return MONO_BREAK_POLICY_ALWAYS;
5591 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5594 * mono_set_break_policy:
5595 * policy_callback: the new callback function
5597 * Allow embedders to decide wherther to actually obey breakpoint instructions
5598 * (both break IL instructions and Debugger.Break () method calls), for example
5599 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5600 * untrusted or semi-trusted code.
5602 * @policy_callback will be called every time a break point instruction needs to
5603 * be inserted with the method argument being the method that calls Debugger.Break()
5604 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5605 * if it wants the breakpoint to not be effective in the given method.
5606 * #MONO_BREAK_POLICY_ALWAYS is the default.
5609 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5611 if (policy_callback)
5612 break_policy_func = policy_callback;
5614 break_policy_func = always_insert_breakpoint;
5618 should_insert_brekpoint (MonoMethod *method) {
5619 switch (break_policy_func (method)) {
5620 case MONO_BREAK_POLICY_ALWAYS:
5622 case MONO_BREAK_POLICY_NEVER:
5624 case MONO_BREAK_POLICY_ON_DBG:
5625 g_warning ("mdb no longer supported");
5628 g_warning ("Incorrect value returned from break policy callback");
5633 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5635 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5637 MonoInst *addr, *store, *load;
5638 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5640 /* the bounds check is already done by the callers */
5641 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5643 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5644 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5645 if (mini_type_is_reference (fsig->params [2]))
5646 emit_write_barrier (cfg, addr, load);
5648 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5649 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5656 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5658 return mini_type_is_reference (&klass->byval_arg);
5662 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5664 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5665 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5666 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5667 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5668 MonoInst *iargs [3];
5671 mono_class_setup_vtable (obj_array);
5672 g_assert (helper->slot);
5674 if (sp [0]->type != STACK_OBJ)
5676 if (sp [2]->type != STACK_OBJ)
5683 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5687 if (mini_is_gsharedvt_variable_klass (klass)) {
5690 // FIXME-VT: OP_ICONST optimization
5691 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5692 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5693 ins->opcode = OP_STOREV_MEMBASE;
5694 } else if (sp [1]->opcode == OP_ICONST) {
5695 int array_reg = sp [0]->dreg;
5696 int index_reg = sp [1]->dreg;
5697 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5699 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5700 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5703 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5704 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5706 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5707 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5708 if (generic_class_is_reference_type (cfg, klass))
5709 emit_write_barrier (cfg, addr, sp [2]);
5716 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5721 eklass = mono_class_from_mono_type (fsig->params [2]);
5723 eklass = mono_class_from_mono_type (fsig->ret);
5726 return emit_array_store (cfg, eklass, args, FALSE);
5728 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5729 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5735 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5738 int param_size, return_size;
5740 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5741 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5743 if (cfg->verbose_level > 3)
5744 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5746 //Don't allow mixing reference types with value types
5747 if (param_klass->valuetype != return_klass->valuetype) {
5748 if (cfg->verbose_level > 3)
5749 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5753 if (!param_klass->valuetype) {
5754 if (cfg->verbose_level > 3)
5755 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5760 if (param_klass->has_references || return_klass->has_references)
5763 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5764 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5765 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5766 if (cfg->verbose_level > 3)
5767 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5771 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5772 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5773 if (cfg->verbose_level > 3)
5774 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5778 param_size = mono_class_value_size (param_klass, &align);
5779 return_size = mono_class_value_size (return_klass, &align);
5781 //We can do it if sizes match
5782 if (param_size == return_size) {
5783 if (cfg->verbose_level > 3)
5784 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5788 //No simple way to handle struct if sizes don't match
5789 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5790 if (cfg->verbose_level > 3)
5791 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5796 * Same reg size category.
5797 * A quick note on why we don't require widening here.
5798 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5800 * Since the source value comes from a function argument, the JIT will already have
5801 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5803 if (param_size <= 4 && return_size <= 4) {
5804 if (cfg->verbose_level > 3)
5805 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5813 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5815 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5816 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5818 if (mini_is_gsharedvt_variable_type (fsig->ret))
5821 //Valuetypes that are semantically equivalent or numbers than can be widened to
5822 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5825 //Arrays of valuetypes that are semantically equivalent
5826 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5833 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5835 #ifdef MONO_ARCH_SIMD_INTRINSICS
5836 MonoInst *ins = NULL;
5838 if (cfg->opt & MONO_OPT_SIMD) {
5839 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5845 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5849 emit_memory_barrier (MonoCompile *cfg, int kind)
5851 MonoInst *ins = NULL;
5852 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5853 MONO_ADD_INS (cfg->cbb, ins);
5854 ins->backend.memory_barrier_kind = kind;
5860 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5862 MonoInst *ins = NULL;
5865 /* The LLVM backend supports these intrinsics */
5866 if (cmethod->klass == mono_defaults.math_class) {
5867 if (strcmp (cmethod->name, "Sin") == 0) {
5869 } else if (strcmp (cmethod->name, "Cos") == 0) {
5871 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5873 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5877 if (opcode && fsig->param_count == 1) {
5878 MONO_INST_NEW (cfg, ins, opcode);
5879 ins->type = STACK_R8;
5880 ins->dreg = mono_alloc_freg (cfg);
5881 ins->sreg1 = args [0]->dreg;
5882 MONO_ADD_INS (cfg->cbb, ins);
5886 if (cfg->opt & MONO_OPT_CMOV) {
5887 if (strcmp (cmethod->name, "Min") == 0) {
5888 if (fsig->params [0]->type == MONO_TYPE_I4)
5890 if (fsig->params [0]->type == MONO_TYPE_U4)
5891 opcode = OP_IMIN_UN;
5892 else if (fsig->params [0]->type == MONO_TYPE_I8)
5894 else if (fsig->params [0]->type == MONO_TYPE_U8)
5895 opcode = OP_LMIN_UN;
5896 } else if (strcmp (cmethod->name, "Max") == 0) {
5897 if (fsig->params [0]->type == MONO_TYPE_I4)
5899 if (fsig->params [0]->type == MONO_TYPE_U4)
5900 opcode = OP_IMAX_UN;
5901 else if (fsig->params [0]->type == MONO_TYPE_I8)
5903 else if (fsig->params [0]->type == MONO_TYPE_U8)
5904 opcode = OP_LMAX_UN;
5908 if (opcode && fsig->param_count == 2) {
5909 MONO_INST_NEW (cfg, ins, opcode);
5910 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5911 ins->dreg = mono_alloc_ireg (cfg);
5912 ins->sreg1 = args [0]->dreg;
5913 ins->sreg2 = args [1]->dreg;
5914 MONO_ADD_INS (cfg->cbb, ins);
5922 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5924 if (cmethod->klass == mono_defaults.array_class) {
5925 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5926 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5927 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5928 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5929 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5930 return emit_array_unsafe_mov (cfg, fsig, args);
5937 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5939 MonoInst *ins = NULL;
5941 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5943 if (cmethod->klass == mono_defaults.string_class) {
5944 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5945 int dreg = alloc_ireg (cfg);
5946 int index_reg = alloc_preg (cfg);
5947 int add_reg = alloc_preg (cfg);
5949 #if SIZEOF_REGISTER == 8
5950 if (COMPILE_LLVM (cfg)) {
5951 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5953 /* The array reg is 64 bits but the index reg is only 32 */
5954 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5957 index_reg = args [1]->dreg;
5959 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5961 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5962 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5963 add_reg = ins->dreg;
5964 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5967 int mult_reg = alloc_preg (cfg);
5968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5969 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5970 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5971 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5973 type_from_op (cfg, ins, NULL, NULL);
5975 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5976 int dreg = alloc_ireg (cfg);
5977 /* Decompose later to allow more optimizations */
5978 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5979 ins->type = STACK_I4;
5980 ins->flags |= MONO_INST_FAULT;
5981 cfg->cbb->has_array_access = TRUE;
5982 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5987 } else if (cmethod->klass == mono_defaults.object_class) {
5988 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5989 int dreg = alloc_ireg_ref (cfg);
5990 int vt_reg = alloc_preg (cfg);
5991 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5992 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5993 type_from_op (cfg, ins, NULL, NULL);
5996 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5997 int dreg = alloc_ireg (cfg);
5998 int t1 = alloc_ireg (cfg);
6000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6001 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6002 ins->type = STACK_I4;
6005 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6006 MONO_INST_NEW (cfg, ins, OP_NOP);
6007 MONO_ADD_INS (cfg->cbb, ins);
6011 } else if (cmethod->klass == mono_defaults.array_class) {
6012 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6013 return emit_array_generic_access (cfg, fsig, args, FALSE);
6014 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6015 return emit_array_generic_access (cfg, fsig, args, TRUE);
6017 #ifndef MONO_BIG_ARRAYS
6019 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6022 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6023 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6024 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6025 int dreg = alloc_ireg (cfg);
6026 int bounds_reg = alloc_ireg_mp (cfg);
6027 MonoBasicBlock *end_bb, *szarray_bb;
6028 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6030 NEW_BBLOCK (cfg, end_bb);
6031 NEW_BBLOCK (cfg, szarray_bb);
6033 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6034 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6035 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6036 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6037 /* Non-szarray case */
6039 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6040 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6042 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6043 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6044 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6045 MONO_START_BB (cfg, szarray_bb);
6048 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6049 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6051 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6052 MONO_START_BB (cfg, end_bb);
6054 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6055 ins->type = STACK_I4;
6061 if (cmethod->name [0] != 'g')
6064 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6065 int dreg = alloc_ireg (cfg);
6066 int vtable_reg = alloc_preg (cfg);
6067 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6068 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6069 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6070 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6071 type_from_op (cfg, ins, NULL, NULL);
6074 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6075 int dreg = alloc_ireg (cfg);
6077 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6078 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6079 type_from_op (cfg, ins, NULL, NULL);
6084 } else if (cmethod->klass == runtime_helpers_class) {
6085 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6086 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6090 } else if (cmethod->klass == mono_defaults.monitor_class) {
6091 gboolean is_enter = FALSE;
6092 gboolean is_v4 = FALSE;
6094 if (!strcmp (cmethod->name, "enter_with_atomic_var") && mono_method_signature (cmethod)->param_count == 2) {
6098 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6103 * To make async stack traces work, icalls which can block should have a wrapper.
6104 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6106 MonoBasicBlock *end_bb;
6108 NEW_BBLOCK (cfg, end_bb);
6110 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6111 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6112 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6113 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6114 MONO_START_BB (cfg, end_bb);
6117 } else if (cmethod->klass == mono_defaults.thread_class) {
6118 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6119 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6120 MONO_ADD_INS (cfg->cbb, ins);
6122 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6123 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6124 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6126 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6128 if (fsig->params [0]->type == MONO_TYPE_I1)
6129 opcode = OP_LOADI1_MEMBASE;
6130 else if (fsig->params [0]->type == MONO_TYPE_U1)
6131 opcode = OP_LOADU1_MEMBASE;
6132 else if (fsig->params [0]->type == MONO_TYPE_I2)
6133 opcode = OP_LOADI2_MEMBASE;
6134 else if (fsig->params [0]->type == MONO_TYPE_U2)
6135 opcode = OP_LOADU2_MEMBASE;
6136 else if (fsig->params [0]->type == MONO_TYPE_I4)
6137 opcode = OP_LOADI4_MEMBASE;
6138 else if (fsig->params [0]->type == MONO_TYPE_U4)
6139 opcode = OP_LOADU4_MEMBASE;
6140 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6141 opcode = OP_LOADI8_MEMBASE;
6142 else if (fsig->params [0]->type == MONO_TYPE_R4)
6143 opcode = OP_LOADR4_MEMBASE;
6144 else if (fsig->params [0]->type == MONO_TYPE_R8)
6145 opcode = OP_LOADR8_MEMBASE;
6146 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6147 opcode = OP_LOAD_MEMBASE;
6150 MONO_INST_NEW (cfg, ins, opcode);
6151 ins->inst_basereg = args [0]->dreg;
6152 ins->inst_offset = 0;
6153 MONO_ADD_INS (cfg->cbb, ins);
6155 switch (fsig->params [0]->type) {
6162 ins->dreg = mono_alloc_ireg (cfg);
6163 ins->type = STACK_I4;
6167 ins->dreg = mono_alloc_lreg (cfg);
6168 ins->type = STACK_I8;
6172 ins->dreg = mono_alloc_ireg (cfg);
6173 #if SIZEOF_REGISTER == 8
6174 ins->type = STACK_I8;
6176 ins->type = STACK_I4;
6181 ins->dreg = mono_alloc_freg (cfg);
6182 ins->type = STACK_R8;
6185 g_assert (mini_type_is_reference (fsig->params [0]));
6186 ins->dreg = mono_alloc_ireg_ref (cfg);
6187 ins->type = STACK_OBJ;
6191 if (opcode == OP_LOADI8_MEMBASE)
6192 ins = mono_decompose_opcode (cfg, ins);
6194 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6198 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6200 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6202 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6203 opcode = OP_STOREI1_MEMBASE_REG;
6204 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6205 opcode = OP_STOREI2_MEMBASE_REG;
6206 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6207 opcode = OP_STOREI4_MEMBASE_REG;
6208 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6209 opcode = OP_STOREI8_MEMBASE_REG;
6210 else if (fsig->params [0]->type == MONO_TYPE_R4)
6211 opcode = OP_STORER4_MEMBASE_REG;
6212 else if (fsig->params [0]->type == MONO_TYPE_R8)
6213 opcode = OP_STORER8_MEMBASE_REG;
6214 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6215 opcode = OP_STORE_MEMBASE_REG;
6218 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6220 MONO_INST_NEW (cfg, ins, opcode);
6221 ins->sreg1 = args [1]->dreg;
6222 ins->inst_destbasereg = args [0]->dreg;
6223 ins->inst_offset = 0;
6224 MONO_ADD_INS (cfg->cbb, ins);
6226 if (opcode == OP_STOREI8_MEMBASE_REG)
6227 ins = mono_decompose_opcode (cfg, ins);
6232 } else if (cmethod->klass->image == mono_defaults.corlib &&
6233 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6234 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6237 #if SIZEOF_REGISTER == 8
6238 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6239 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6240 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6241 ins->dreg = mono_alloc_preg (cfg);
6242 ins->sreg1 = args [0]->dreg;
6243 ins->type = STACK_I8;
6244 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6245 MONO_ADD_INS (cfg->cbb, ins);
6249 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6251 /* 64 bit reads are already atomic */
6252 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6253 load_ins->dreg = mono_alloc_preg (cfg);
6254 load_ins->inst_basereg = args [0]->dreg;
6255 load_ins->inst_offset = 0;
6256 load_ins->type = STACK_I8;
6257 MONO_ADD_INS (cfg->cbb, load_ins);
6259 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6266 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6267 MonoInst *ins_iconst;
6270 if (fsig->params [0]->type == MONO_TYPE_I4) {
6271 opcode = OP_ATOMIC_ADD_I4;
6272 cfg->has_atomic_add_i4 = TRUE;
6274 #if SIZEOF_REGISTER == 8
6275 else if (fsig->params [0]->type == MONO_TYPE_I8)
6276 opcode = OP_ATOMIC_ADD_I8;
6279 if (!mono_arch_opcode_supported (opcode))
6281 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6282 ins_iconst->inst_c0 = 1;
6283 ins_iconst->dreg = mono_alloc_ireg (cfg);
6284 MONO_ADD_INS (cfg->cbb, ins_iconst);
6286 MONO_INST_NEW (cfg, ins, opcode);
6287 ins->dreg = mono_alloc_ireg (cfg);
6288 ins->inst_basereg = args [0]->dreg;
6289 ins->inst_offset = 0;
6290 ins->sreg2 = ins_iconst->dreg;
6291 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6292 MONO_ADD_INS (cfg->cbb, ins);
6294 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6295 MonoInst *ins_iconst;
6298 if (fsig->params [0]->type == MONO_TYPE_I4) {
6299 opcode = OP_ATOMIC_ADD_I4;
6300 cfg->has_atomic_add_i4 = TRUE;
6302 #if SIZEOF_REGISTER == 8
6303 else if (fsig->params [0]->type == MONO_TYPE_I8)
6304 opcode = OP_ATOMIC_ADD_I8;
6307 if (!mono_arch_opcode_supported (opcode))
6309 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6310 ins_iconst->inst_c0 = -1;
6311 ins_iconst->dreg = mono_alloc_ireg (cfg);
6312 MONO_ADD_INS (cfg->cbb, ins_iconst);
6314 MONO_INST_NEW (cfg, ins, opcode);
6315 ins->dreg = mono_alloc_ireg (cfg);
6316 ins->inst_basereg = args [0]->dreg;
6317 ins->inst_offset = 0;
6318 ins->sreg2 = ins_iconst->dreg;
6319 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6320 MONO_ADD_INS (cfg->cbb, ins);
6322 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6325 if (fsig->params [0]->type == MONO_TYPE_I4) {
6326 opcode = OP_ATOMIC_ADD_I4;
6327 cfg->has_atomic_add_i4 = TRUE;
6329 #if SIZEOF_REGISTER == 8
6330 else if (fsig->params [0]->type == MONO_TYPE_I8)
6331 opcode = OP_ATOMIC_ADD_I8;
6334 if (!mono_arch_opcode_supported (opcode))
6336 MONO_INST_NEW (cfg, ins, opcode);
6337 ins->dreg = mono_alloc_ireg (cfg);
6338 ins->inst_basereg = args [0]->dreg;
6339 ins->inst_offset = 0;
6340 ins->sreg2 = args [1]->dreg;
6341 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6342 MONO_ADD_INS (cfg->cbb, ins);
6345 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6346 MonoInst *f2i = NULL, *i2f;
6347 guint32 opcode, f2i_opcode, i2f_opcode;
6348 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6349 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6351 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6352 fsig->params [0]->type == MONO_TYPE_R4) {
6353 opcode = OP_ATOMIC_EXCHANGE_I4;
6354 f2i_opcode = OP_MOVE_F_TO_I4;
6355 i2f_opcode = OP_MOVE_I4_TO_F;
6356 cfg->has_atomic_exchange_i4 = TRUE;
6358 #if SIZEOF_REGISTER == 8
6360 fsig->params [0]->type == MONO_TYPE_I8 ||
6361 fsig->params [0]->type == MONO_TYPE_R8 ||
6362 fsig->params [0]->type == MONO_TYPE_I) {
6363 opcode = OP_ATOMIC_EXCHANGE_I8;
6364 f2i_opcode = OP_MOVE_F_TO_I8;
6365 i2f_opcode = OP_MOVE_I8_TO_F;
6368 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6369 opcode = OP_ATOMIC_EXCHANGE_I4;
6370 cfg->has_atomic_exchange_i4 = TRUE;
6376 if (!mono_arch_opcode_supported (opcode))
6380 /* TODO: Decompose these opcodes instead of bailing here. */
6381 if (COMPILE_SOFT_FLOAT (cfg))
6384 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6385 f2i->dreg = mono_alloc_ireg (cfg);
6386 f2i->sreg1 = args [1]->dreg;
6387 if (f2i_opcode == OP_MOVE_F_TO_I4)
6388 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6389 MONO_ADD_INS (cfg->cbb, f2i);
6392 MONO_INST_NEW (cfg, ins, opcode);
6393 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6394 ins->inst_basereg = args [0]->dreg;
6395 ins->inst_offset = 0;
6396 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6397 MONO_ADD_INS (cfg->cbb, ins);
6399 switch (fsig->params [0]->type) {
6401 ins->type = STACK_I4;
6404 ins->type = STACK_I8;
6407 #if SIZEOF_REGISTER == 8
6408 ins->type = STACK_I8;
6410 ins->type = STACK_I4;
6415 ins->type = STACK_R8;
6418 g_assert (mini_type_is_reference (fsig->params [0]));
6419 ins->type = STACK_OBJ;
6424 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6425 i2f->dreg = mono_alloc_freg (cfg);
6426 i2f->sreg1 = ins->dreg;
6427 i2f->type = STACK_R8;
6428 if (i2f_opcode == OP_MOVE_I4_TO_F)
6429 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6430 MONO_ADD_INS (cfg->cbb, i2f);
6435 if (cfg->gen_write_barriers && is_ref)
6436 emit_write_barrier (cfg, args [0], args [1]);
6438 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6439 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6440 guint32 opcode, f2i_opcode, i2f_opcode;
6441 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6442 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6444 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6445 fsig->params [1]->type == MONO_TYPE_R4) {
6446 opcode = OP_ATOMIC_CAS_I4;
6447 f2i_opcode = OP_MOVE_F_TO_I4;
6448 i2f_opcode = OP_MOVE_I4_TO_F;
6449 cfg->has_atomic_cas_i4 = TRUE;
6451 #if SIZEOF_REGISTER == 8
6453 fsig->params [1]->type == MONO_TYPE_I8 ||
6454 fsig->params [1]->type == MONO_TYPE_R8 ||
6455 fsig->params [1]->type == MONO_TYPE_I) {
6456 opcode = OP_ATOMIC_CAS_I8;
6457 f2i_opcode = OP_MOVE_F_TO_I8;
6458 i2f_opcode = OP_MOVE_I8_TO_F;
6461 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6462 opcode = OP_ATOMIC_CAS_I4;
6463 cfg->has_atomic_cas_i4 = TRUE;
6469 if (!mono_arch_opcode_supported (opcode))
6473 /* TODO: Decompose these opcodes instead of bailing here. */
6474 if (COMPILE_SOFT_FLOAT (cfg))
6477 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6478 f2i_new->dreg = mono_alloc_ireg (cfg);
6479 f2i_new->sreg1 = args [1]->dreg;
6480 if (f2i_opcode == OP_MOVE_F_TO_I4)
6481 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6482 MONO_ADD_INS (cfg->cbb, f2i_new);
6484 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6485 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6486 f2i_cmp->sreg1 = args [2]->dreg;
6487 if (f2i_opcode == OP_MOVE_F_TO_I4)
6488 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6489 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6492 MONO_INST_NEW (cfg, ins, opcode);
6493 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6494 ins->sreg1 = args [0]->dreg;
6495 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6496 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6497 MONO_ADD_INS (cfg->cbb, ins);
6499 switch (fsig->params [1]->type) {
6501 ins->type = STACK_I4;
6504 ins->type = STACK_I8;
6507 #if SIZEOF_REGISTER == 8
6508 ins->type = STACK_I8;
6510 ins->type = STACK_I4;
6514 ins->type = cfg->r4_stack_type;
6517 ins->type = STACK_R8;
6520 g_assert (mini_type_is_reference (fsig->params [1]));
6521 ins->type = STACK_OBJ;
6526 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6527 i2f->dreg = mono_alloc_freg (cfg);
6528 i2f->sreg1 = ins->dreg;
6529 i2f->type = STACK_R8;
6530 if (i2f_opcode == OP_MOVE_I4_TO_F)
6531 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6532 MONO_ADD_INS (cfg->cbb, i2f);
6537 if (cfg->gen_write_barriers && is_ref)
6538 emit_write_barrier (cfg, args [0], args [1]);
6540 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6541 fsig->params [1]->type == MONO_TYPE_I4) {
6542 MonoInst *cmp, *ceq;
6544 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6547 /* int32 r = CAS (location, value, comparand); */
6548 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6549 ins->dreg = alloc_ireg (cfg);
6550 ins->sreg1 = args [0]->dreg;
6551 ins->sreg2 = args [1]->dreg;
6552 ins->sreg3 = args [2]->dreg;
6553 ins->type = STACK_I4;
6554 MONO_ADD_INS (cfg->cbb, ins);
6556 /* bool result = r == comparand; */
6557 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6558 cmp->sreg1 = ins->dreg;
6559 cmp->sreg2 = args [2]->dreg;
6560 cmp->type = STACK_I4;
6561 MONO_ADD_INS (cfg->cbb, cmp);
6563 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6564 ceq->dreg = alloc_ireg (cfg);
6565 ceq->type = STACK_I4;
6566 MONO_ADD_INS (cfg->cbb, ceq);
6568 /* *success = result; */
6569 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6571 cfg->has_atomic_cas_i4 = TRUE;
6573 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6574 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6578 } else if (cmethod->klass->image == mono_defaults.corlib &&
6579 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6580 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6583 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6585 MonoType *t = fsig->params [0];
6587 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6589 g_assert (t->byref);
6590 /* t is a byref type, so the reference check is more complicated */
6591 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6592 if (t->type == MONO_TYPE_I1)
6593 opcode = OP_ATOMIC_LOAD_I1;
6594 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6595 opcode = OP_ATOMIC_LOAD_U1;
6596 else if (t->type == MONO_TYPE_I2)
6597 opcode = OP_ATOMIC_LOAD_I2;
6598 else if (t->type == MONO_TYPE_U2)
6599 opcode = OP_ATOMIC_LOAD_U2;
6600 else if (t->type == MONO_TYPE_I4)
6601 opcode = OP_ATOMIC_LOAD_I4;
6602 else if (t->type == MONO_TYPE_U4)
6603 opcode = OP_ATOMIC_LOAD_U4;
6604 else if (t->type == MONO_TYPE_R4)
6605 opcode = OP_ATOMIC_LOAD_R4;
6606 else if (t->type == MONO_TYPE_R8)
6607 opcode = OP_ATOMIC_LOAD_R8;
6608 #if SIZEOF_REGISTER == 8
6609 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6610 opcode = OP_ATOMIC_LOAD_I8;
6611 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6612 opcode = OP_ATOMIC_LOAD_U8;
6614 else if (t->type == MONO_TYPE_I)
6615 opcode = OP_ATOMIC_LOAD_I4;
6616 else if (is_ref || t->type == MONO_TYPE_U)
6617 opcode = OP_ATOMIC_LOAD_U4;
6621 if (!mono_arch_opcode_supported (opcode))
6624 MONO_INST_NEW (cfg, ins, opcode);
6625 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6626 ins->sreg1 = args [0]->dreg;
6627 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6628 MONO_ADD_INS (cfg->cbb, ins);
6631 case MONO_TYPE_BOOLEAN:
6638 ins->type = STACK_I4;
6642 ins->type = STACK_I8;
6646 #if SIZEOF_REGISTER == 8
6647 ins->type = STACK_I8;
6649 ins->type = STACK_I4;
6653 ins->type = cfg->r4_stack_type;
6656 ins->type = STACK_R8;
6660 ins->type = STACK_OBJ;
6666 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6668 MonoType *t = fsig->params [0];
6671 g_assert (t->byref);
6672 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6673 if (t->type == MONO_TYPE_I1)
6674 opcode = OP_ATOMIC_STORE_I1;
6675 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6676 opcode = OP_ATOMIC_STORE_U1;
6677 else if (t->type == MONO_TYPE_I2)
6678 opcode = OP_ATOMIC_STORE_I2;
6679 else if (t->type == MONO_TYPE_U2)
6680 opcode = OP_ATOMIC_STORE_U2;
6681 else if (t->type == MONO_TYPE_I4)
6682 opcode = OP_ATOMIC_STORE_I4;
6683 else if (t->type == MONO_TYPE_U4)
6684 opcode = OP_ATOMIC_STORE_U4;
6685 else if (t->type == MONO_TYPE_R4)
6686 opcode = OP_ATOMIC_STORE_R4;
6687 else if (t->type == MONO_TYPE_R8)
6688 opcode = OP_ATOMIC_STORE_R8;
6689 #if SIZEOF_REGISTER == 8
6690 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6691 opcode = OP_ATOMIC_STORE_I8;
6692 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6693 opcode = OP_ATOMIC_STORE_U8;
6695 else if (t->type == MONO_TYPE_I)
6696 opcode = OP_ATOMIC_STORE_I4;
6697 else if (is_ref || t->type == MONO_TYPE_U)
6698 opcode = OP_ATOMIC_STORE_U4;
6702 if (!mono_arch_opcode_supported (opcode))
6705 MONO_INST_NEW (cfg, ins, opcode);
6706 ins->dreg = args [0]->dreg;
6707 ins->sreg1 = args [1]->dreg;
6708 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6709 MONO_ADD_INS (cfg->cbb, ins);
6711 if (cfg->gen_write_barriers && is_ref)
6712 emit_write_barrier (cfg, args [0], args [1]);
6718 } else if (cmethod->klass->image == mono_defaults.corlib &&
6719 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6720 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6721 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6722 if (should_insert_brekpoint (cfg->method)) {
6723 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6725 MONO_INST_NEW (cfg, ins, OP_NOP);
6726 MONO_ADD_INS (cfg->cbb, ins);
6730 } else if (cmethod->klass->image == mono_defaults.corlib &&
6731 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6732 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6733 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6735 EMIT_NEW_ICONST (cfg, ins, 1);
6737 EMIT_NEW_ICONST (cfg, ins, 0);
6740 } else if (cmethod->klass->image == mono_defaults.corlib &&
6741 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6742 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6743 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6744 /* No stack walks are currently available, so implement this as an intrinsic */
6745 MonoInst *assembly_ins;
6747 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6748 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6751 } else if (cmethod->klass->image == mono_defaults.corlib &&
6752 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6753 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6754 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6755 /* No stack walks are currently available, so implement this as an intrinsic */
6756 MonoInst *method_ins;
6757 MonoMethod *declaring = cfg->method;
6759 /* This returns the declaring generic method */
6760 if (declaring->is_inflated)
6761 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6762 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6763 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6764 cfg->no_inline = TRUE;
6765 if (cfg->method != cfg->current_method)
6766 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6769 } else if (cmethod->klass == mono_defaults.math_class) {
6771 * There is general branchless code for Min/Max, but it does not work for
6773 * http://everything2.com/?node_id=1051618
6775 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6776 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6777 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6778 !strcmp (cmethod->klass->name, "Selector")) ||
6779 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6780 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6781 !strcmp (cmethod->klass->name, "Selector"))
6783 if (cfg->backend->have_objc_get_selector &&
6784 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6785 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6786 cfg->compile_aot && !cfg->llvm_only) {
6788 MonoJumpInfoToken *ji;
6793 cfg->exception_message = g_strdup ("GetHandle");
6794 cfg->disable_llvm = TRUE;
6796 if (args [0]->opcode == OP_GOT_ENTRY) {
6797 pi = (MonoInst *)args [0]->inst_p1;
6798 g_assert (pi->opcode == OP_PATCH_INFO);
6799 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6800 ji = (MonoJumpInfoToken *)pi->inst_p0;
6802 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6803 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6806 NULLIFY_INS (args [0]);
6809 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6810 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6811 ins->dreg = mono_alloc_ireg (cfg);
6813 ins->inst_p0 = mono_string_to_utf8 (s);
6814 MONO_ADD_INS (cfg->cbb, ins);
6819 #ifdef MONO_ARCH_SIMD_INTRINSICS
6820 if (cfg->opt & MONO_OPT_SIMD) {
6821 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6827 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6831 if (COMPILE_LLVM (cfg)) {
6832 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6837 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6841 * This entry point could be used later for arbitrary method
6844 inline static MonoInst*
6845 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6846 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6848 if (method->klass == mono_defaults.string_class) {
6849 /* managed string allocation support */
6850 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6851 MonoInst *iargs [2];
6852 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6853 MonoMethod *managed_alloc = NULL;
6855 g_assert (vtable); /*Should not fail since it System.String*/
6856 #ifndef MONO_CROSS_COMPILE
6857 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6861 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6862 iargs [1] = args [0];
6863 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6870 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6872 MonoInst *store, *temp;
6875 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6876 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6879 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6880 * would be different than the MonoInst's used to represent arguments, and
6881 * the ldelema implementation can't deal with that.
6882 * Solution: When ldelema is used on an inline argument, create a var for
6883 * it, emit ldelema on that var, and emit the saving code below in
6884 * inline_method () if needed.
6886 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6887 cfg->args [i] = temp;
6888 /* This uses cfg->args [i] which is set by the preceeding line */
6889 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6890 store->cil_code = sp [0]->cil_code;
6895 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6896 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6898 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6900 check_inline_called_method_name_limit (MonoMethod *called_method)
6903 static const char *limit = NULL;
6905 if (limit == NULL) {
6906 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6908 if (limit_string != NULL)
6909 limit = limit_string;
6914 if (limit [0] != '\0') {
6915 char *called_method_name = mono_method_full_name (called_method, TRUE);
6917 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6918 g_free (called_method_name);
6920 //return (strncmp_result <= 0);
6921 return (strncmp_result == 0);
6928 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6930 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6933 static const char *limit = NULL;
6935 if (limit == NULL) {
6936 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6937 if (limit_string != NULL) {
6938 limit = limit_string;
6944 if (limit [0] != '\0') {
6945 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6947 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6948 g_free (caller_method_name);
6950 //return (strncmp_result <= 0);
6951 return (strncmp_result == 0);
6959 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6961 static double r8_0 = 0.0;
6962 static float r4_0 = 0.0;
6966 rtype = mini_get_underlying_type (rtype);
6970 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6971 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6972 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6973 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6974 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6975 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6976 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6977 ins->type = STACK_R4;
6978 ins->inst_p0 = (void*)&r4_0;
6980 MONO_ADD_INS (cfg->cbb, ins);
6981 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6982 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6983 ins->type = STACK_R8;
6984 ins->inst_p0 = (void*)&r8_0;
6986 MONO_ADD_INS (cfg->cbb, ins);
6987 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6988 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6989 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6990 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6991 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6993 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6998 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7002 rtype = mini_get_underlying_type (rtype);
7006 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7007 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7008 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7009 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7010 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7011 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7012 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7013 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7014 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7015 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7016 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7017 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7018 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7019 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7021 emit_init_rvar (cfg, dreg, rtype);
7025 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7027 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7029 MonoInst *var = cfg->locals [local];
7030 if (COMPILE_SOFT_FLOAT (cfg)) {
7032 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7033 emit_init_rvar (cfg, reg, type);
7034 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7037 emit_init_rvar (cfg, var->dreg, type);
7039 emit_dummy_init_rvar (cfg, var->dreg, type);
7046 * Return the cost of inlining CMETHOD.
7049 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7050 guchar *ip, guint real_offset, gboolean inline_always)
7052 MonoInst *ins, *rvar = NULL;
7053 MonoMethodHeader *cheader;
7054 MonoBasicBlock *ebblock, *sbblock;
7056 MonoMethod *prev_inlined_method;
7057 MonoInst **prev_locals, **prev_args;
7058 MonoType **prev_arg_types;
7059 guint prev_real_offset;
7060 GHashTable *prev_cbb_hash;
7061 MonoBasicBlock **prev_cil_offset_to_bb;
7062 MonoBasicBlock *prev_cbb;
7063 unsigned char* prev_cil_start;
7064 guint32 prev_cil_offset_to_bb_len;
7065 MonoMethod *prev_current_method;
7066 MonoGenericContext *prev_generic_context;
7067 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7069 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7071 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7072 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7075 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7076 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7081 fsig = mono_method_signature (cmethod);
7083 if (cfg->verbose_level > 2)
7084 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7086 if (!cmethod->inline_info) {
7087 cfg->stat_inlineable_methods++;
7088 cmethod->inline_info = 1;
7091 /* allocate local variables */
7092 cheader = mono_method_get_header (cmethod);
7094 if (cheader == NULL || mono_loader_get_last_error ()) {
7096 mono_metadata_free_mh (cheader);
7097 if (inline_always && mono_loader_get_last_error ()) {
7098 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7099 mono_error_set_from_loader_error (&cfg->error);
7102 mono_loader_clear_error ();
7106 /*Must verify before creating locals as it can cause the JIT to assert.*/
7107 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7108 mono_metadata_free_mh (cheader);
7112 /* allocate space to store the return value */
7113 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7114 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7117 prev_locals = cfg->locals;
7118 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7119 for (i = 0; i < cheader->num_locals; ++i)
7120 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7122 /* allocate start and end blocks */
7123 /* This is needed so if the inline is aborted, we can clean up */
7124 NEW_BBLOCK (cfg, sbblock);
7125 sbblock->real_offset = real_offset;
7127 NEW_BBLOCK (cfg, ebblock);
7128 ebblock->block_num = cfg->num_bblocks++;
7129 ebblock->real_offset = real_offset;
7131 prev_args = cfg->args;
7132 prev_arg_types = cfg->arg_types;
7133 prev_inlined_method = cfg->inlined_method;
7134 cfg->inlined_method = cmethod;
7135 cfg->ret_var_set = FALSE;
7136 cfg->inline_depth ++;
7137 prev_real_offset = cfg->real_offset;
7138 prev_cbb_hash = cfg->cbb_hash;
7139 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7140 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7141 prev_cil_start = cfg->cil_start;
7142 prev_cbb = cfg->cbb;
7143 prev_current_method = cfg->current_method;
7144 prev_generic_context = cfg->generic_context;
7145 prev_ret_var_set = cfg->ret_var_set;
7146 prev_disable_inline = cfg->disable_inline;
7148 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7151 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7153 ret_var_set = cfg->ret_var_set;
7155 cfg->inlined_method = prev_inlined_method;
7156 cfg->real_offset = prev_real_offset;
7157 cfg->cbb_hash = prev_cbb_hash;
7158 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7159 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7160 cfg->cil_start = prev_cil_start;
7161 cfg->locals = prev_locals;
7162 cfg->args = prev_args;
7163 cfg->arg_types = prev_arg_types;
7164 cfg->current_method = prev_current_method;
7165 cfg->generic_context = prev_generic_context;
7166 cfg->ret_var_set = prev_ret_var_set;
7167 cfg->disable_inline = prev_disable_inline;
7168 cfg->inline_depth --;
7170 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7171 if (cfg->verbose_level > 2)
7172 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7174 cfg->stat_inlined_methods++;
7176 /* always add some code to avoid block split failures */
7177 MONO_INST_NEW (cfg, ins, OP_NOP);
7178 MONO_ADD_INS (prev_cbb, ins);
7180 prev_cbb->next_bb = sbblock;
7181 link_bblock (cfg, prev_cbb, sbblock);
7184 * Get rid of the begin and end bblocks if possible to aid local
7187 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7189 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7190 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7192 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7193 MonoBasicBlock *prev = ebblock->in_bb [0];
7195 if (prev->next_bb == ebblock) {
7196 mono_merge_basic_blocks (cfg, prev, ebblock);
7198 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7199 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7200 cfg->cbb = prev_cbb;
7203 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7208 * Its possible that the rvar is set in some prev bblock, but not in others.
7214 for (i = 0; i < ebblock->in_count; ++i) {
7215 bb = ebblock->in_bb [i];
7217 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7220 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7230 * If the inlined method contains only a throw, then the ret var is not
7231 * set, so set it to a dummy value.
7234 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7236 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7239 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7242 if (cfg->verbose_level > 2)
7243 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7244 cfg->exception_type = MONO_EXCEPTION_NONE;
7245 mono_loader_clear_error ();
7247 /* This gets rid of the newly added bblocks */
7248 cfg->cbb = prev_cbb;
7250 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7255 * Some of these comments may well be out-of-date.
7256 * Design decisions: we do a single pass over the IL code (and we do bblock
7257 * splitting/merging in the few cases when it's required: a back jump to an IL
7258 * address that was not already seen as bblock starting point).
7259 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7260 * Complex operations are decomposed in simpler ones right away. We need to let the
7261 * arch-specific code peek and poke inside this process somehow (except when the
7262 * optimizations can take advantage of the full semantic info of coarse opcodes).
7263 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7264 * MonoInst->opcode initially is the IL opcode or some simplification of that
7265 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7266 * opcode with value bigger than OP_LAST.
7267 * At this point the IR can be handed over to an interpreter, a dumb code generator
7268 * or to the optimizing code generator that will translate it to SSA form.
7270 * Profiling directed optimizations.
7271 * We may compile by default with few or no optimizations and instrument the code
7272 * or the user may indicate what methods to optimize the most either in a config file
7273 * or through repeated runs where the compiler applies offline the optimizations to
7274 * each method and then decides if it was worth it.
7277 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7278 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7279 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7280 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7281 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7282 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7283 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7284 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7286 /* offset from br.s -> br like opcodes */
7287 #define BIG_BRANCH_OFFSET 13
7290 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7292 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7294 return b == NULL || b == bb;
7298 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7300 unsigned char *ip = start;
7301 unsigned char *target;
7304 MonoBasicBlock *bblock;
7305 const MonoOpcode *opcode;
7308 cli_addr = ip - start;
7309 i = mono_opcode_value ((const guint8 **)&ip, end);
7312 opcode = &mono_opcodes [i];
7313 switch (opcode->argument) {
7314 case MonoInlineNone:
7317 case MonoInlineString:
7318 case MonoInlineType:
7319 case MonoInlineField:
7320 case MonoInlineMethod:
7323 case MonoShortInlineR:
7330 case MonoShortInlineVar:
7331 case MonoShortInlineI:
7334 case MonoShortInlineBrTarget:
7335 target = start + cli_addr + 2 + (signed char)ip [1];
7336 GET_BBLOCK (cfg, bblock, target);
7339 GET_BBLOCK (cfg, bblock, ip);
7341 case MonoInlineBrTarget:
7342 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7343 GET_BBLOCK (cfg, bblock, target);
7346 GET_BBLOCK (cfg, bblock, ip);
7348 case MonoInlineSwitch: {
7349 guint32 n = read32 (ip + 1);
7352 cli_addr += 5 + 4 * n;
7353 target = start + cli_addr;
7354 GET_BBLOCK (cfg, bblock, target);
7356 for (j = 0; j < n; ++j) {
7357 target = start + cli_addr + (gint32)read32 (ip);
7358 GET_BBLOCK (cfg, bblock, target);
7368 g_assert_not_reached ();
7371 if (i == CEE_THROW) {
7372 unsigned char *bb_start = ip - 1;
7374 /* Find the start of the bblock containing the throw */
7376 while ((bb_start >= start) && !bblock) {
7377 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7381 bblock->out_of_line = 1;
7391 static inline MonoMethod *
7392 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7396 mono_error_init (error);
7398 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7399 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7401 method = mono_class_inflate_generic_method_checked (method, context, error);
7404 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7410 static inline MonoMethod *
7411 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7414 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7416 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7417 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7421 if (!method && !cfg)
7422 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7427 static inline MonoClass*
7428 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7433 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7434 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7436 klass = mono_class_inflate_generic_class (klass, context);
7438 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7439 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7442 mono_class_init (klass);
7446 static inline MonoMethodSignature*
7447 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7449 MonoMethodSignature *fsig;
7451 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7452 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7454 fsig = mono_metadata_parse_signature (method->klass->image, token);
7458 fsig = mono_inflate_generic_signature(fsig, context, &error);
7460 g_assert(mono_error_ok(&error));
7466 throw_exception (void)
7468 static MonoMethod *method = NULL;
7471 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7472 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7479 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7481 MonoMethod *thrower = throw_exception ();
7484 EMIT_NEW_PCONST (cfg, args [0], ex);
7485 mono_emit_method_call (cfg, thrower, args, NULL);
7489 * Return the original method is a wrapper is specified. We can only access
7490 * the custom attributes from the original method.
7493 get_original_method (MonoMethod *method)
7495 if (method->wrapper_type == MONO_WRAPPER_NONE)
7498 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7499 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7502 /* in other cases we need to find the original method */
7503 return mono_marshal_method_from_wrapper (method);
7507 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7509 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7510 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7512 emit_throw_exception (cfg, ex);
7516 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7518 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7519 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7521 emit_throw_exception (cfg, ex);
7525 * Check that the IL instructions at ip are the array initialization
7526 * sequence and return the pointer to the data and the size.
7529 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7532 * newarr[System.Int32]
7534 * ldtoken field valuetype ...
7535 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7537 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7539 guint32 token = read32 (ip + 7);
7540 guint32 field_token = read32 (ip + 2);
7541 guint32 field_index = field_token & 0xffffff;
7543 const char *data_ptr;
7545 MonoMethod *cmethod;
7546 MonoClass *dummy_class;
7547 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7551 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7555 *out_field_token = field_token;
7557 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7560 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7562 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7563 case MONO_TYPE_BOOLEAN:
7567 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7568 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7569 case MONO_TYPE_CHAR:
7586 if (size > mono_type_size (field->type, &dummy_align))
7589 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7590 if (!image_is_dynamic (method->klass->image)) {
7591 field_index = read32 (ip + 2) & 0xffffff;
7592 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7593 data_ptr = mono_image_rva_map (method->klass->image, rva);
7594 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7595 /* for aot code we do the lookup on load */
7596 if (aot && data_ptr)
7597 return (const char *)GUINT_TO_POINTER (rva);
7599 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7601 data_ptr = mono_field_get_data (field);
7609 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7611 char *method_fname = mono_method_full_name (method, TRUE);
7613 MonoMethodHeader *header = mono_method_get_header (method);
7615 if (header->code_size == 0)
7616 method_code = g_strdup ("method body is empty.");
7618 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7619 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7620 g_free (method_fname);
7621 g_free (method_code);
7622 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7626 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7629 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7630 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7631 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7632 /* Optimize reg-reg moves away */
7634 * Can't optimize other opcodes, since sp[0] might point to
7635 * the last ins of a decomposed opcode.
7637 sp [0]->dreg = (cfg)->locals [n]->dreg;
7639 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7644 * ldloca inhibits many optimizations so try to get rid of it in common
7647 static inline unsigned char *
7648 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7658 local = read16 (ip + 2);
7662 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7663 /* From the INITOBJ case */
7664 token = read32 (ip + 2);
7665 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7666 CHECK_TYPELOAD (klass);
7667 type = mini_get_underlying_type (&klass->byval_arg);
7668 emit_init_local (cfg, local, type, TRUE);
7676 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7678 MonoInst *icall_args [16];
7679 MonoInst *call_target, *ins, *vtable_ins;
7680 int arg_reg, this_reg, vtable_reg;
7681 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7682 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7683 gboolean variant_iface = FALSE;
7688 * In llvm-only mode, vtables contain function descriptors instead of
7689 * method addresses/trampolines.
7691 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7694 slot = mono_method_get_imt_slot (cmethod);
7696 slot = mono_method_get_vtable_index (cmethod);
7698 this_reg = sp [0]->dreg;
7700 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7701 variant_iface = TRUE;
7703 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7705 * The simplest case, a normal virtual call.
7707 int slot_reg = alloc_preg (cfg);
7708 int addr_reg = alloc_preg (cfg);
7709 int arg_reg = alloc_preg (cfg);
7710 MonoBasicBlock *non_null_bb;
7712 vtable_reg = alloc_preg (cfg);
7713 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7714 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7716 /* Load the vtable slot, which contains a function descriptor. */
7717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7719 NEW_BBLOCK (cfg, non_null_bb);
7721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7722 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7726 // FIXME: Make the wrapper use the preserveall cconv
7727 // FIXME: Use one icall per slot for small slot numbers ?
7728 icall_args [0] = vtable_ins;
7729 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7730 /* Make the icall return the vtable slot value to save some code space */
7731 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7732 ins->dreg = slot_reg;
7733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7736 MONO_START_BB (cfg, non_null_bb);
7737 /* Load the address + arg from the vtable slot */
7738 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7741 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7744 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7746 * A simple interface call
7748 * We make a call through an imt slot to obtain the function descriptor we need to call.
7749 * The imt slot contains a function descriptor for a runtime function + arg.
7751 int slot_reg = alloc_preg (cfg);
7752 int addr_reg = alloc_preg (cfg);
7753 int arg_reg = alloc_preg (cfg);
7754 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7756 vtable_reg = alloc_preg (cfg);
7757 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7758 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7761 * The slot is already initialized when the vtable is created so there is no need
7765 /* Load the imt slot, which contains a function descriptor. */
7766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7768 /* Load the address + arg of the imt thunk from the imt slot */
7769 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7770 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7772 * IMT thunks in llvm-only mode are C functions which take an info argument
7773 * plus the imt method and return the ftndesc to call.
7775 icall_args [0] = thunk_arg_ins;
7776 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7777 cmethod, MONO_RGCTX_INFO_METHOD);
7778 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7780 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7783 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7785 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7786 * dynamically extended as more instantiations are discovered.
7787 * This handles generic virtual methods both on classes and interfaces.
7789 int slot_reg = alloc_preg (cfg);
7790 int addr_reg = alloc_preg (cfg);
7791 int arg_reg = alloc_preg (cfg);
7792 int ftndesc_reg = alloc_preg (cfg);
7793 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7794 MonoBasicBlock *slowpath_bb, *end_bb;
7796 NEW_BBLOCK (cfg, slowpath_bb);
7797 NEW_BBLOCK (cfg, end_bb);
7799 vtable_reg = alloc_preg (cfg);
7800 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7802 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7804 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7806 /* Load the slot, which contains a function descriptor. */
7807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7809 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7810 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7811 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7812 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7815 /* Same as with iface calls */
7816 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7817 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7818 icall_args [0] = thunk_arg_ins;
7819 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7820 cmethod, MONO_RGCTX_INFO_METHOD);
7821 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7822 ftndesc_ins->dreg = ftndesc_reg;
7824 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7825 * they don't know about yet. Fall back to the slowpath in that case.
7827 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7828 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7830 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7833 MONO_START_BB (cfg, slowpath_bb);
7834 icall_args [0] = vtable_ins;
7835 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7836 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7837 cmethod, MONO_RGCTX_INFO_METHOD);
7839 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7841 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7842 ftndesc_ins->dreg = ftndesc_reg;
7843 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7846 MONO_START_BB (cfg, end_bb);
7847 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7851 * Non-optimized cases
7853 icall_args [0] = sp [0];
7854 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7856 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7857 cmethod, MONO_RGCTX_INFO_METHOD);
7859 arg_reg = alloc_preg (cfg);
7860 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7861 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7863 g_assert (is_gsharedvt);
7865 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7867 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7870 * Pass the extra argument even if the callee doesn't receive it, most
7871 * calling conventions allow this.
7873 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7877 is_exception_class (MonoClass *klass)
7880 if (klass == mono_defaults.exception_class)
7882 klass = klass->parent;
7888 * is_jit_optimizer_disabled:
7890 * Determine whenever M's assembly has a DebuggableAttribute with the
7891 * IsJITOptimizerDisabled flag set.
7894 is_jit_optimizer_disabled (MonoMethod *m)
7896 MonoAssembly *ass = m->klass->image->assembly;
7897 MonoCustomAttrInfo* attrs;
7898 static MonoClass *klass;
7900 gboolean val = FALSE;
7903 if (ass->jit_optimizer_disabled_inited)
7904 return ass->jit_optimizer_disabled;
7907 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7910 ass->jit_optimizer_disabled = FALSE;
7911 mono_memory_barrier ();
7912 ass->jit_optimizer_disabled_inited = TRUE;
7916 attrs = mono_custom_attrs_from_assembly (ass);
7918 for (i = 0; i < attrs->num_attrs; ++i) {
7919 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7921 MonoMethodSignature *sig;
7923 if (!attr->ctor || attr->ctor->klass != klass)
7925 /* Decode the attribute. See reflection.c */
7926 p = (const char*)attr->data;
7927 g_assert (read16 (p) == 0x0001);
7930 // FIXME: Support named parameters
7931 sig = mono_method_signature (attr->ctor);
7932 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7934 /* Two boolean arguments */
7938 mono_custom_attrs_free (attrs);
7941 ass->jit_optimizer_disabled = val;
7942 mono_memory_barrier ();
7943 ass->jit_optimizer_disabled_inited = TRUE;
7949 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7951 gboolean supported_tail_call;
7954 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7956 for (i = 0; i < fsig->param_count; ++i) {
7957 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7958 /* These can point to the current method's stack */
7959 supported_tail_call = FALSE;
7961 if (fsig->hasthis && cmethod->klass->valuetype)
7962 /* this might point to the current method's stack */
7963 supported_tail_call = FALSE;
7964 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7965 supported_tail_call = FALSE;
7966 if (cfg->method->save_lmf)
7967 supported_tail_call = FALSE;
7968 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7969 supported_tail_call = FALSE;
7970 if (call_opcode != CEE_CALL)
7971 supported_tail_call = FALSE;
7973 /* Debugging support */
7975 if (supported_tail_call) {
7976 if (!mono_debug_count ())
7977 supported_tail_call = FALSE;
7981 return supported_tail_call;
7987 * Handle calls made to ctors from NEWOBJ opcodes.
7990 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7991 MonoInst **sp, guint8 *ip, int *inline_costs)
7993 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7995 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7996 mono_method_is_generic_sharable (cmethod, TRUE)) {
7997 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7998 mono_class_vtable (cfg->domain, cmethod->klass);
7999 CHECK_TYPELOAD (cmethod->klass);
8001 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8002 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8005 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8006 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8008 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8010 CHECK_TYPELOAD (cmethod->klass);
8011 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8016 /* Avoid virtual calls to ctors if possible */
8017 if (mono_class_is_marshalbyref (cmethod->klass))
8018 callvirt_this_arg = sp [0];
8020 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8021 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8022 CHECK_CFG_EXCEPTION;
8023 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8024 mono_method_check_inlining (cfg, cmethod) &&
8025 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8028 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8029 cfg->real_offset += 5;
8031 *inline_costs += costs - 5;
8033 INLINE_FAILURE ("inline failure");
8034 // FIXME-VT: Clean this up
8035 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8036 GSHAREDVT_FAILURE(*ip);
8037 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8039 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8042 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8044 if (cfg->llvm_only) {
8045 // FIXME: Avoid initializing vtable_arg
8046 emit_llvmonly_calli (cfg, fsig, sp, addr);
8048 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8050 } else if (context_used &&
8051 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8052 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8053 MonoInst *cmethod_addr;
8055 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8057 if (cfg->llvm_only) {
8058 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8059 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8060 emit_llvmonly_calli (cfg, fsig, sp, addr);
8062 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8063 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8065 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8068 INLINE_FAILURE ("ctor call");
8069 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8070 callvirt_this_arg, NULL, vtable_arg);
8077 emit_setret (MonoCompile *cfg, MonoInst *val)
8079 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8082 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8085 if (!cfg->vret_addr) {
8086 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8088 EMIT_NEW_RETLOADA (cfg, ret_addr);
8090 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8091 ins->klass = mono_class_from_mono_type (ret_type);
8094 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8095 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8096 MonoInst *iargs [1];
8100 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8101 mono_arch_emit_setret (cfg, cfg->method, conv);
8103 mono_arch_emit_setret (cfg, cfg->method, val);
8106 mono_arch_emit_setret (cfg, cfg->method, val);
8112 * mono_method_to_ir:
8114 * Translate the .net IL into linear IR.
8117 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8118 MonoInst *return_var, MonoInst **inline_args,
8119 guint inline_offset, gboolean is_virtual_call)
8122 MonoInst *ins, **sp, **stack_start;
8123 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8124 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8125 MonoMethod *cmethod, *method_definition;
8126 MonoInst **arg_array;
8127 MonoMethodHeader *header;
8129 guint32 token, ins_flag;
8131 MonoClass *constrained_class = NULL;
8132 unsigned char *ip, *end, *target, *err_pos;
8133 MonoMethodSignature *sig;
8134 MonoGenericContext *generic_context = NULL;
8135 MonoGenericContainer *generic_container = NULL;
8136 MonoType **param_types;
8137 int i, n, start_new_bblock, dreg;
8138 int num_calls = 0, inline_costs = 0;
8139 int breakpoint_id = 0;
8141 GSList *class_inits = NULL;
8142 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8144 gboolean init_locals, seq_points, skip_dead_blocks;
8145 gboolean sym_seq_points = FALSE;
8146 MonoDebugMethodInfo *minfo;
8147 MonoBitSet *seq_point_locs = NULL;
8148 MonoBitSet *seq_point_set_locs = NULL;
8150 cfg->disable_inline = is_jit_optimizer_disabled (method);
8152 /* serialization and xdomain stuff may need access to private fields and methods */
8153 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8154 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8155 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8156 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8157 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8158 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8160 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8161 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8162 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8163 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8164 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8166 image = method->klass->image;
8167 header = mono_method_get_header (method);
8169 if (mono_loader_get_last_error ()) {
8170 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8171 mono_error_set_from_loader_error (&cfg->error);
8173 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name));
8175 goto exception_exit;
8177 generic_container = mono_method_get_generic_container (method);
8178 sig = mono_method_signature (method);
8179 num_args = sig->hasthis + sig->param_count;
8180 ip = (unsigned char*)header->code;
8181 cfg->cil_start = ip;
8182 end = ip + header->code_size;
8183 cfg->stat_cil_code_size += header->code_size;
8185 seq_points = cfg->gen_seq_points && cfg->method == method;
8187 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8188 /* We could hit a seq point before attaching to the JIT (#8338) */
8192 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8193 minfo = mono_debug_lookup_method (method);
8195 MonoSymSeqPoint *sps;
8196 int i, n_il_offsets;
8198 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8199 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8200 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8201 sym_seq_points = TRUE;
8202 for (i = 0; i < n_il_offsets; ++i) {
8203 if (sps [i].il_offset < header->code_size)
8204 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8207 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8208 /* Methods without line number info like auto-generated property accessors */
8209 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8210 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8211 sym_seq_points = TRUE;
8216 * Methods without init_locals set could cause asserts in various passes
8217 * (#497220). To work around this, we emit dummy initialization opcodes
8218 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8219 * on some platforms.
8221 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8222 init_locals = header->init_locals;
8226 method_definition = method;
8227 while (method_definition->is_inflated) {
8228 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8229 method_definition = imethod->declaring;
8232 /* SkipVerification is not allowed if core-clr is enabled */
8233 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8235 dont_verify_stloc = TRUE;
8238 if (sig->is_inflated)
8239 generic_context = mono_method_get_context (method);
8240 else if (generic_container)
8241 generic_context = &generic_container->context;
8242 cfg->generic_context = generic_context;
8245 g_assert (!sig->has_type_parameters);
8247 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8248 g_assert (method->is_inflated);
8249 g_assert (mono_method_get_context (method)->method_inst);
8251 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8252 g_assert (sig->generic_param_count);
8254 if (cfg->method == method) {
8255 cfg->real_offset = 0;
8257 cfg->real_offset = inline_offset;
8260 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8261 cfg->cil_offset_to_bb_len = header->code_size;
8263 cfg->current_method = method;
8265 if (cfg->verbose_level > 2)
8266 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8268 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8270 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8271 for (n = 0; n < sig->param_count; ++n)
8272 param_types [n + sig->hasthis] = sig->params [n];
8273 cfg->arg_types = param_types;
8275 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8276 if (cfg->method == method) {
8278 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8279 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8282 NEW_BBLOCK (cfg, start_bblock);
8283 cfg->bb_entry = start_bblock;
8284 start_bblock->cil_code = NULL;
8285 start_bblock->cil_length = 0;
8288 NEW_BBLOCK (cfg, end_bblock);
8289 cfg->bb_exit = end_bblock;
8290 end_bblock->cil_code = NULL;
8291 end_bblock->cil_length = 0;
8292 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8293 g_assert (cfg->num_bblocks == 2);
8295 arg_array = cfg->args;
8297 if (header->num_clauses) {
8298 cfg->spvars = g_hash_table_new (NULL, NULL);
8299 cfg->exvars = g_hash_table_new (NULL, NULL);
8301 /* handle exception clauses */
8302 for (i = 0; i < header->num_clauses; ++i) {
8303 MonoBasicBlock *try_bb;
8304 MonoExceptionClause *clause = &header->clauses [i];
8305 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8307 try_bb->real_offset = clause->try_offset;
8308 try_bb->try_start = TRUE;
8309 try_bb->region = ((i + 1) << 8) | clause->flags;
8310 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8311 tblock->real_offset = clause->handler_offset;
8312 tblock->flags |= BB_EXCEPTION_HANDLER;
8315 * Linking the try block with the EH block hinders inlining as we won't be able to
8316 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8318 if (COMPILE_LLVM (cfg))
8319 link_bblock (cfg, try_bb, tblock);
8321 if (*(ip + clause->handler_offset) == CEE_POP)
8322 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8324 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8325 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8326 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8327 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8328 MONO_ADD_INS (tblock, ins);
8330 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8331 /* finally clauses already have a seq point */
8332 /* seq points for filter clauses are emitted below */
8333 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8334 MONO_ADD_INS (tblock, ins);
8337 /* todo: is a fault block unsafe to optimize? */
8338 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8339 tblock->flags |= BB_EXCEPTION_UNSAFE;
8342 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8344 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8346 /* catch and filter blocks get the exception object on the stack */
8347 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8348 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8350 /* mostly like handle_stack_args (), but just sets the input args */
8351 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8352 tblock->in_scount = 1;
8353 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8354 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8358 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8359 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8360 if (!cfg->compile_llvm) {
8361 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8362 ins->dreg = tblock->in_stack [0]->dreg;
8363 MONO_ADD_INS (tblock, ins);
8366 MonoInst *dummy_use;
8369 * Add a dummy use for the exvar so its liveness info will be
8372 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8375 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8376 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8377 MONO_ADD_INS (tblock, ins);
8380 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8381 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8382 tblock->flags |= BB_EXCEPTION_HANDLER;
8383 tblock->real_offset = clause->data.filter_offset;
8384 tblock->in_scount = 1;
8385 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8386 /* The filter block shares the exvar with the handler block */
8387 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8388 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8389 MONO_ADD_INS (tblock, ins);
8393 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8394 clause->data.catch_class &&
8396 mono_class_check_context_used (clause->data.catch_class)) {
8398 * In shared generic code with catch
8399 * clauses containing type variables
8400 * the exception handling code has to
8401 * be able to get to the rgctx.
8402 * Therefore we have to make sure that
8403 * the vtable/mrgctx argument (for
8404 * static or generic methods) or the
8405 * "this" argument (for non-static
8406 * methods) are live.
8408 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8409 mini_method_get_context (method)->method_inst ||
8410 method->klass->valuetype) {
8411 mono_get_vtable_var (cfg);
8413 MonoInst *dummy_use;
8415 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8420 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8421 cfg->cbb = start_bblock;
8422 cfg->args = arg_array;
8423 mono_save_args (cfg, sig, inline_args);
8426 /* FIRST CODE BLOCK */
8427 NEW_BBLOCK (cfg, tblock);
8428 tblock->cil_code = ip;
8432 ADD_BBLOCK (cfg, tblock);
8434 if (cfg->method == method) {
8435 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8436 if (breakpoint_id) {
8437 MONO_INST_NEW (cfg, ins, OP_BREAK);
8438 MONO_ADD_INS (cfg->cbb, ins);
8442 /* we use a separate basic block for the initialization code */
8443 NEW_BBLOCK (cfg, init_localsbb);
8444 cfg->bb_init = init_localsbb;
8445 init_localsbb->real_offset = cfg->real_offset;
8446 start_bblock->next_bb = init_localsbb;
8447 init_localsbb->next_bb = cfg->cbb;
8448 link_bblock (cfg, start_bblock, init_localsbb);
8449 link_bblock (cfg, init_localsbb, cfg->cbb);
8451 cfg->cbb = init_localsbb;
8453 if (cfg->gsharedvt && cfg->method == method) {
8454 MonoGSharedVtMethodInfo *info;
8455 MonoInst *var, *locals_var;
8458 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8459 info->method = cfg->method;
8460 info->count_entries = 16;
8461 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8462 cfg->gsharedvt_info = info;
8464 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8465 /* prevent it from being register allocated */
8466 //var->flags |= MONO_INST_VOLATILE;
8467 cfg->gsharedvt_info_var = var;
8469 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8472 /* Allocate locals */
8473 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8474 /* prevent it from being register allocated */
8475 //locals_var->flags |= MONO_INST_VOLATILE;
8476 cfg->gsharedvt_locals_var = locals_var;
8478 dreg = alloc_ireg (cfg);
8479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8481 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8482 ins->dreg = locals_var->dreg;
8484 MONO_ADD_INS (cfg->cbb, ins);
8485 cfg->gsharedvt_locals_var_ins = ins;
8487 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8490 ins->flags |= MONO_INST_INIT;
8494 if (mono_security_core_clr_enabled ()) {
8495 /* check if this is native code, e.g. an icall or a p/invoke */
8496 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8497 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8499 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8500 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8502 /* if this ia a native call then it can only be JITted from platform code */
8503 if ((icall || pinvk) && method->klass && method->klass->image) {
8504 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8505 MonoException *ex = icall ? mono_get_exception_security () :
8506 mono_get_exception_method_access ();
8507 emit_throw_exception (cfg, ex);
8514 CHECK_CFG_EXCEPTION;
8516 if (header->code_size == 0)
8519 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8524 if (cfg->method == method)
8525 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8527 for (n = 0; n < header->num_locals; ++n) {
8528 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8533 /* We force the vtable variable here for all shared methods
8534 for the possibility that they might show up in a stack
8535 trace where their exact instantiation is needed. */
8536 if (cfg->gshared && method == cfg->method) {
8537 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8538 mini_method_get_context (method)->method_inst ||
8539 method->klass->valuetype) {
8540 mono_get_vtable_var (cfg);
8542 /* FIXME: Is there a better way to do this?
8543 We need the variable live for the duration
8544 of the whole method. */
8545 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8549 /* add a check for this != NULL to inlined methods */
8550 if (is_virtual_call) {
8553 NEW_ARGLOAD (cfg, arg_ins, 0);
8554 MONO_ADD_INS (cfg->cbb, arg_ins);
8555 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8558 skip_dead_blocks = !dont_verify;
8559 if (skip_dead_blocks) {
8560 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8565 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8566 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8569 start_new_bblock = 0;
8571 if (cfg->method == method)
8572 cfg->real_offset = ip - header->code;
8574 cfg->real_offset = inline_offset;
8579 if (start_new_bblock) {
8580 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8581 if (start_new_bblock == 2) {
8582 g_assert (ip == tblock->cil_code);
8584 GET_BBLOCK (cfg, tblock, ip);
8586 cfg->cbb->next_bb = tblock;
8588 start_new_bblock = 0;
8589 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8590 if (cfg->verbose_level > 3)
8591 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8592 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8596 g_slist_free (class_inits);
8599 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8600 link_bblock (cfg, cfg->cbb, tblock);
8601 if (sp != stack_start) {
8602 handle_stack_args (cfg, stack_start, sp - stack_start);
8604 CHECK_UNVERIFIABLE (cfg);
8606 cfg->cbb->next_bb = tblock;
8608 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8609 if (cfg->verbose_level > 3)
8610 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8611 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8614 g_slist_free (class_inits);
8619 if (skip_dead_blocks) {
8620 int ip_offset = ip - header->code;
8622 if (ip_offset == bb->end)
8626 int op_size = mono_opcode_size (ip, end);
8627 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8629 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8631 if (ip_offset + op_size == bb->end) {
8632 MONO_INST_NEW (cfg, ins, OP_NOP);
8633 MONO_ADD_INS (cfg->cbb, ins);
8634 start_new_bblock = 1;
8642 * Sequence points are points where the debugger can place a breakpoint.
8643 * Currently, we generate these automatically at points where the IL
8646 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8648 * Make methods interruptable at the beginning, and at the targets of
8649 * backward branches.
8650 * Also, do this at the start of every bblock in methods with clauses too,
8651 * to be able to handle instructions with inprecise control flow like
8653 * Backward branches are handled at the end of method-to-ir ().
8655 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8656 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8658 /* Avoid sequence points on empty IL like .volatile */
8659 // FIXME: Enable this
8660 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8661 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8662 if ((sp != stack_start) && !sym_seq_point)
8663 ins->flags |= MONO_INST_NONEMPTY_STACK;
8664 MONO_ADD_INS (cfg->cbb, ins);
8667 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8670 cfg->cbb->real_offset = cfg->real_offset;
8672 if ((cfg->method == method) && cfg->coverage_info) {
8673 guint32 cil_offset = ip - header->code;
8674 cfg->coverage_info->data [cil_offset].cil_code = ip;
8676 /* TODO: Use an increment here */
8677 #if defined(TARGET_X86)
8678 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8679 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8681 MONO_ADD_INS (cfg->cbb, ins);
8683 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8684 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8688 if (cfg->verbose_level > 3)
8689 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8693 if (seq_points && !sym_seq_points && sp != stack_start) {
8695 * The C# compiler uses these nops to notify the JIT that it should
8696 * insert seq points.
8698 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8699 MONO_ADD_INS (cfg->cbb, ins);
8701 if (cfg->keep_cil_nops)
8702 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8704 MONO_INST_NEW (cfg, ins, OP_NOP);
8706 MONO_ADD_INS (cfg->cbb, ins);
8709 if (should_insert_brekpoint (cfg->method)) {
8710 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8712 MONO_INST_NEW (cfg, ins, OP_NOP);
8715 MONO_ADD_INS (cfg->cbb, ins);
8721 CHECK_STACK_OVF (1);
8722 n = (*ip)-CEE_LDARG_0;
8724 EMIT_NEW_ARGLOAD (cfg, ins, n);
8732 CHECK_STACK_OVF (1);
8733 n = (*ip)-CEE_LDLOC_0;
8735 EMIT_NEW_LOCLOAD (cfg, ins, n);
8744 n = (*ip)-CEE_STLOC_0;
8747 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8749 emit_stloc_ir (cfg, sp, header, n);
8756 CHECK_STACK_OVF (1);
8759 EMIT_NEW_ARGLOAD (cfg, ins, n);
8765 CHECK_STACK_OVF (1);
8768 NEW_ARGLOADA (cfg, ins, n);
8769 MONO_ADD_INS (cfg->cbb, ins);
8779 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8781 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8786 CHECK_STACK_OVF (1);
8789 EMIT_NEW_LOCLOAD (cfg, ins, n);
8793 case CEE_LDLOCA_S: {
8794 unsigned char *tmp_ip;
8796 CHECK_STACK_OVF (1);
8797 CHECK_LOCAL (ip [1]);
8799 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8805 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8814 CHECK_LOCAL (ip [1]);
8815 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8817 emit_stloc_ir (cfg, sp, header, ip [1]);
8822 CHECK_STACK_OVF (1);
8823 EMIT_NEW_PCONST (cfg, ins, NULL);
8824 ins->type = STACK_OBJ;
8829 CHECK_STACK_OVF (1);
8830 EMIT_NEW_ICONST (cfg, ins, -1);
8843 CHECK_STACK_OVF (1);
8844 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8850 CHECK_STACK_OVF (1);
8852 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8858 CHECK_STACK_OVF (1);
8859 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8865 CHECK_STACK_OVF (1);
8866 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8867 ins->type = STACK_I8;
8868 ins->dreg = alloc_dreg (cfg, STACK_I8);
8870 ins->inst_l = (gint64)read64 (ip);
8871 MONO_ADD_INS (cfg->cbb, ins);
8877 gboolean use_aotconst = FALSE;
8879 #ifdef TARGET_POWERPC
8880 /* FIXME: Clean this up */
8881 if (cfg->compile_aot)
8882 use_aotconst = TRUE;
8885 /* FIXME: we should really allocate this only late in the compilation process */
8886 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8888 CHECK_STACK_OVF (1);
8894 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8896 dreg = alloc_freg (cfg);
8897 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8898 ins->type = cfg->r4_stack_type;
8900 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8901 ins->type = cfg->r4_stack_type;
8902 ins->dreg = alloc_dreg (cfg, STACK_R8);
8904 MONO_ADD_INS (cfg->cbb, ins);
8914 gboolean use_aotconst = FALSE;
8916 #ifdef TARGET_POWERPC
8917 /* FIXME: Clean this up */
8918 if (cfg->compile_aot)
8919 use_aotconst = TRUE;
8922 /* FIXME: we should really allocate this only late in the compilation process */
8923 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8925 CHECK_STACK_OVF (1);
8931 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8933 dreg = alloc_freg (cfg);
8934 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8935 ins->type = STACK_R8;
8937 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8938 ins->type = STACK_R8;
8939 ins->dreg = alloc_dreg (cfg, STACK_R8);
8941 MONO_ADD_INS (cfg->cbb, ins);
8950 MonoInst *temp, *store;
8952 CHECK_STACK_OVF (1);
8956 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8957 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8959 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8962 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8975 if (sp [0]->type == STACK_R8)
8976 /* we need to pop the value from the x86 FP stack */
8977 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8982 MonoMethodSignature *fsig;
8985 INLINE_FAILURE ("jmp");
8986 GSHAREDVT_FAILURE (*ip);
8989 if (stack_start != sp)
8991 token = read32 (ip + 1);
8992 /* FIXME: check the signature matches */
8993 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8996 if (cfg->gshared && mono_method_check_context_used (cmethod))
8997 GENERIC_SHARING_FAILURE (CEE_JMP);
8999 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9001 fsig = mono_method_signature (cmethod);
9002 n = fsig->param_count + fsig->hasthis;
9003 if (cfg->llvm_only) {
9006 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9007 for (i = 0; i < n; ++i)
9008 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9009 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9011 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9012 * have to emit a normal return since llvm expects it.
9015 emit_setret (cfg, ins);
9016 MONO_INST_NEW (cfg, ins, OP_BR);
9017 ins->inst_target_bb = end_bblock;
9018 MONO_ADD_INS (cfg->cbb, ins);
9019 link_bblock (cfg, cfg->cbb, end_bblock);
9022 } else if (cfg->backend->have_op_tail_call) {
9023 /* Handle tail calls similarly to calls */
9026 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9027 call->method = cmethod;
9028 call->tail_call = TRUE;
9029 call->signature = mono_method_signature (cmethod);
9030 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9031 call->inst.inst_p0 = cmethod;
9032 for (i = 0; i < n; ++i)
9033 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9035 mono_arch_emit_call (cfg, call);
9036 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9037 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9039 for (i = 0; i < num_args; ++i)
9040 /* Prevent arguments from being optimized away */
9041 arg_array [i]->flags |= MONO_INST_VOLATILE;
9043 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9044 ins = (MonoInst*)call;
9045 ins->inst_p0 = cmethod;
9046 MONO_ADD_INS (cfg->cbb, ins);
9050 start_new_bblock = 1;
9055 MonoMethodSignature *fsig;
9058 token = read32 (ip + 1);
9062 //GSHAREDVT_FAILURE (*ip);
9067 fsig = mini_get_signature (method, token, generic_context);
9069 if (method->dynamic && fsig->pinvoke) {
9073 * This is a call through a function pointer using a pinvoke
9074 * signature. Have to create a wrapper and call that instead.
9075 * FIXME: This is very slow, need to create a wrapper at JIT time
9076 * instead based on the signature.
9078 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9079 EMIT_NEW_PCONST (cfg, args [1], fsig);
9081 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9084 n = fsig->param_count + fsig->hasthis;
9088 //g_assert (!virtual_ || fsig->hasthis);
9092 inline_costs += 10 * num_calls++;
9095 * Making generic calls out of gsharedvt methods.
9096 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9097 * patching gshared method addresses into a gsharedvt method.
9099 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9101 * We pass the address to the gsharedvt trampoline in the rgctx reg
9103 MonoInst *callee = addr;
9105 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9107 GSHAREDVT_FAILURE (*ip);
9111 GSHAREDVT_FAILURE (*ip);
9113 addr = emit_get_rgctx_sig (cfg, context_used,
9114 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9115 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9119 /* Prevent inlining of methods with indirect calls */
9120 INLINE_FAILURE ("indirect call");
9122 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9123 MonoJumpInfoType info_type;
9127 * Instead of emitting an indirect call, emit a direct call
9128 * with the contents of the aotconst as the patch info.
9130 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9131 info_type = (MonoJumpInfoType)addr->inst_c1;
9132 info_data = addr->inst_p0;
9134 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9135 info_data = addr->inst_right->inst_left;
9138 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9139 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9144 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9148 /* End of call, INS should contain the result of the call, if any */
9150 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9152 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9155 CHECK_CFG_EXCEPTION;
9159 constrained_class = NULL;
9163 case CEE_CALLVIRT: {
9164 MonoInst *addr = NULL;
9165 MonoMethodSignature *fsig = NULL;
9167 int virtual_ = *ip == CEE_CALLVIRT;
9168 gboolean pass_imt_from_rgctx = FALSE;
9169 MonoInst *imt_arg = NULL;
9170 MonoInst *keep_this_alive = NULL;
9171 gboolean pass_vtable = FALSE;
9172 gboolean pass_mrgctx = FALSE;
9173 MonoInst *vtable_arg = NULL;
9174 gboolean check_this = FALSE;
9175 gboolean supported_tail_call = FALSE;
9176 gboolean tail_call = FALSE;
9177 gboolean need_seq_point = FALSE;
9178 guint32 call_opcode = *ip;
9179 gboolean emit_widen = TRUE;
9180 gboolean push_res = TRUE;
9181 gboolean skip_ret = FALSE;
9182 gboolean delegate_invoke = FALSE;
9183 gboolean direct_icall = FALSE;
9184 gboolean constrained_partial_call = FALSE;
9185 MonoMethod *cil_method;
9188 token = read32 (ip + 1);
9192 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9195 cil_method = cmethod;
9197 if (constrained_class) {
9198 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9199 if (!mini_is_gsharedvt_klass (constrained_class)) {
9200 g_assert (!cmethod->klass->valuetype);
9201 if (!mini_type_is_reference (&constrained_class->byval_arg))
9202 constrained_partial_call = TRUE;
9206 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9207 if (cfg->verbose_level > 2)
9208 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9209 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9210 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9212 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9216 if (cfg->verbose_level > 2)
9217 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9219 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9221 * This is needed since get_method_constrained can't find
9222 * the method in klass representing a type var.
9223 * The type var is guaranteed to be a reference type in this
9226 if (!mini_is_gsharedvt_klass (constrained_class))
9227 g_assert (!cmethod->klass->valuetype);
9229 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9235 if (!cmethod || mono_loader_get_last_error ()) {
9236 if (mono_loader_get_last_error ()) {
9237 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9238 mono_error_set_from_loader_error (&cfg->error);
9244 if (!dont_verify && !cfg->skip_visibility) {
9245 MonoMethod *target_method = cil_method;
9246 if (method->is_inflated) {
9247 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9250 if (!mono_method_can_access_method (method_definition, target_method) &&
9251 !mono_method_can_access_method (method, cil_method))
9252 METHOD_ACCESS_FAILURE (method, cil_method);
9255 if (mono_security_core_clr_enabled ())
9256 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9258 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9259 /* MS.NET seems to silently convert this to a callvirt */
9264 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9265 * converts to a callvirt.
9267 * tests/bug-515884.il is an example of this behavior
9269 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9270 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9271 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9275 if (!cmethod->klass->inited)
9276 if (!mono_class_init (cmethod->klass))
9277 TYPE_LOAD_ERROR (cmethod->klass);
9279 fsig = mono_method_signature (cmethod);
9282 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9283 mini_class_is_system_array (cmethod->klass)) {
9284 array_rank = cmethod->klass->rank;
9285 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9286 direct_icall = TRUE;
9287 } else if (fsig->pinvoke) {
9288 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9289 fsig = mono_method_signature (wrapper);
9290 } else if (constrained_class) {
9292 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9296 if (cfg->llvm_only && !cfg->method->wrapper_type)
9297 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9299 /* See code below */
9300 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9301 MonoBasicBlock *tbb;
9303 GET_BBLOCK (cfg, tbb, ip + 5);
9304 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9306 * We want to extend the try block to cover the call, but we can't do it if the
9307 * call is made directly since its followed by an exception check.
9309 direct_icall = FALSE;
9313 mono_save_token_info (cfg, image, token, cil_method);
9315 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9316 need_seq_point = TRUE;
9318 /* Don't support calls made using type arguments for now */
9320 if (cfg->gsharedvt) {
9321 if (mini_is_gsharedvt_signature (fsig))
9322 GSHAREDVT_FAILURE (*ip);
9326 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9327 g_assert_not_reached ();
9329 n = fsig->param_count + fsig->hasthis;
9331 if (!cfg->gshared && cmethod->klass->generic_container)
9335 g_assert (!mono_method_check_context_used (cmethod));
9339 //g_assert (!virtual_ || fsig->hasthis);
9344 * We have the `constrained.' prefix opcode.
9346 if (constrained_class) {
9347 if (mini_is_gsharedvt_klass (constrained_class)) {
9348 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9349 /* The 'Own method' case below */
9350 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9351 /* 'The type parameter is instantiated as a reference type' case below. */
9353 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9354 CHECK_CFG_EXCEPTION;
9360 if (constrained_partial_call) {
9361 gboolean need_box = TRUE;
9364 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9365 * called method is not known at compile time either. The called method could end up being
9366 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9367 * to box the receiver.
9368 * A simple solution would be to box always and make a normal virtual call, but that would
9369 * be bad performance wise.
9371 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9373 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9378 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9379 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9381 ins->klass = constrained_class;
9382 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9383 CHECK_CFG_EXCEPTION;
9384 } else if (need_box) {
9386 MonoBasicBlock *is_ref_bb, *end_bb;
9387 MonoInst *nonbox_call;
9390 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9392 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9393 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9395 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9397 NEW_BBLOCK (cfg, is_ref_bb);
9398 NEW_BBLOCK (cfg, end_bb);
9400 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9405 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9407 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9410 MONO_START_BB (cfg, is_ref_bb);
9411 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9412 ins->klass = constrained_class;
9413 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9414 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9416 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9418 MONO_START_BB (cfg, end_bb);
9421 nonbox_call->dreg = ins->dreg;
9424 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9425 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9426 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9429 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9431 * The type parameter is instantiated as a valuetype,
9432 * but that type doesn't override the method we're
9433 * calling, so we need to box `this'.
9435 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9436 ins->klass = constrained_class;
9437 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9438 CHECK_CFG_EXCEPTION;
9439 } else if (!constrained_class->valuetype) {
9440 int dreg = alloc_ireg_ref (cfg);
9443 * The type parameter is instantiated as a reference
9444 * type. We have a managed pointer on the stack, so
9445 * we need to dereference it here.
9447 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9448 ins->type = STACK_OBJ;
9451 if (cmethod->klass->valuetype) {
9454 /* Interface method */
9457 mono_class_setup_vtable (constrained_class);
9458 CHECK_TYPELOAD (constrained_class);
9459 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9461 TYPE_LOAD_ERROR (constrained_class);
9462 slot = mono_method_get_vtable_slot (cmethod);
9464 TYPE_LOAD_ERROR (cmethod->klass);
9465 cmethod = constrained_class->vtable [ioffset + slot];
9467 if (cmethod->klass == mono_defaults.enum_class) {
9468 /* Enum implements some interfaces, so treat this as the first case */
9469 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9470 ins->klass = constrained_class;
9471 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9472 CHECK_CFG_EXCEPTION;
9477 constrained_class = NULL;
9480 if (check_call_signature (cfg, fsig, sp))
9483 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9484 delegate_invoke = TRUE;
9486 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9487 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9488 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9496 * If the callee is a shared method, then its static cctor
9497 * might not get called after the call was patched.
9499 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9500 emit_class_init (cfg, cmethod->klass);
9501 CHECK_TYPELOAD (cmethod->klass);
9504 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9507 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9509 context_used = mini_method_check_context_used (cfg, cmethod);
9511 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9512 /* Generic method interface
9513 calls are resolved via a
9514 helper function and don't
9516 if (!cmethod_context || !cmethod_context->method_inst)
9517 pass_imt_from_rgctx = TRUE;
9521 * If a shared method calls another
9522 * shared method then the caller must
9523 * have a generic sharing context
9524 * because the magic trampoline
9525 * requires it. FIXME: We shouldn't
9526 * have to force the vtable/mrgctx
9527 * variable here. Instead there
9528 * should be a flag in the cfg to
9529 * request a generic sharing context.
9532 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9533 mono_get_vtable_var (cfg);
9538 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9540 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9542 CHECK_TYPELOAD (cmethod->klass);
9543 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9548 g_assert (!vtable_arg);
9550 if (!cfg->compile_aot) {
9552 * emit_get_rgctx_method () calls mono_class_vtable () so check
9553 * for type load errors before.
9555 mono_class_setup_vtable (cmethod->klass);
9556 CHECK_TYPELOAD (cmethod->klass);
9559 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9561 /* !marshalbyref is needed to properly handle generic methods + remoting */
9562 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9563 MONO_METHOD_IS_FINAL (cmethod)) &&
9564 !mono_class_is_marshalbyref (cmethod->klass)) {
9571 if (pass_imt_from_rgctx) {
9572 g_assert (!pass_vtable);
9574 imt_arg = emit_get_rgctx_method (cfg, context_used,
9575 cmethod, MONO_RGCTX_INFO_METHOD);
9579 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9581 /* Calling virtual generic methods */
9582 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9583 !(MONO_METHOD_IS_FINAL (cmethod) &&
9584 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9585 fsig->generic_param_count &&
9586 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9588 MonoInst *this_temp, *this_arg_temp, *store;
9589 MonoInst *iargs [4];
9591 g_assert (fsig->is_inflated);
9593 /* Prevent inlining of methods that contain indirect calls */
9594 INLINE_FAILURE ("virtual generic call");
9596 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9597 GSHAREDVT_FAILURE (*ip);
9599 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9600 g_assert (!imt_arg);
9602 g_assert (cmethod->is_inflated);
9603 imt_arg = emit_get_rgctx_method (cfg, context_used,
9604 cmethod, MONO_RGCTX_INFO_METHOD);
9605 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9607 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9608 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9609 MONO_ADD_INS (cfg->cbb, store);
9611 /* FIXME: This should be a managed pointer */
9612 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9614 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9615 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9616 cmethod, MONO_RGCTX_INFO_METHOD);
9617 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9618 addr = mono_emit_jit_icall (cfg,
9619 mono_helper_compile_generic_method, iargs);
9621 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9623 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9630 * Implement a workaround for the inherent races involved in locking:
9636 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9637 * try block, the Exit () won't be executed, see:
9638 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9639 * To work around this, we extend such try blocks to include the last x bytes
9640 * of the Monitor.Enter () call.
9642 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9643 MonoBasicBlock *tbb;
9645 GET_BBLOCK (cfg, tbb, ip + 5);
9647 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9648 * from Monitor.Enter like ArgumentNullException.
9650 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9651 /* Mark this bblock as needing to be extended */
9652 tbb->extend_try_block = TRUE;
9656 /* Conversion to a JIT intrinsic */
9657 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9658 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9659 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9666 if ((cfg->opt & MONO_OPT_INLINE) &&
9667 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9668 mono_method_check_inlining (cfg, cmethod)) {
9670 gboolean always = FALSE;
9672 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9673 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9674 /* Prevent inlining of methods that call wrappers */
9675 INLINE_FAILURE ("wrapper call");
9676 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9680 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9682 cfg->real_offset += 5;
9684 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9685 /* *sp is already set by inline_method */
9690 inline_costs += costs;
9696 /* Tail recursion elimination */
9697 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9698 gboolean has_vtargs = FALSE;
9701 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9702 INLINE_FAILURE ("tail call");
9704 /* keep it simple */
9705 for (i = fsig->param_count - 1; i >= 0; i--) {
9706 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9711 for (i = 0; i < n; ++i)
9712 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9713 MONO_INST_NEW (cfg, ins, OP_BR);
9714 MONO_ADD_INS (cfg->cbb, ins);
9715 tblock = start_bblock->out_bb [0];
9716 link_bblock (cfg, cfg->cbb, tblock);
9717 ins->inst_target_bb = tblock;
9718 start_new_bblock = 1;
9720 /* skip the CEE_RET, too */
9721 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9728 inline_costs += 10 * num_calls++;
9731 * Making generic calls out of gsharedvt methods.
9732 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9733 * patching gshared method addresses into a gsharedvt method.
9735 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9736 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9737 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9738 MonoRgctxInfoType info_type;
9741 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9742 //GSHAREDVT_FAILURE (*ip);
9743 // disable for possible remoting calls
9744 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9745 GSHAREDVT_FAILURE (*ip);
9746 if (fsig->generic_param_count) {
9747 /* virtual generic call */
9748 g_assert (!imt_arg);
9749 /* Same as the virtual generic case above */
9750 imt_arg = emit_get_rgctx_method (cfg, context_used,
9751 cmethod, MONO_RGCTX_INFO_METHOD);
9752 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9754 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9755 /* This can happen when we call a fully instantiated iface method */
9756 imt_arg = emit_get_rgctx_method (cfg, context_used,
9757 cmethod, MONO_RGCTX_INFO_METHOD);
9762 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9763 keep_this_alive = sp [0];
9765 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9766 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9768 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9769 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9771 if (cfg->llvm_only) {
9772 // FIXME: Avoid initializing vtable_arg
9773 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9775 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9780 /* Generic sharing */
9783 * Use this if the callee is gsharedvt sharable too, since
9784 * at runtime we might find an instantiation so the call cannot
9785 * be patched (the 'no_patch' code path in mini-trampolines.c).
9787 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9788 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9789 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9790 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9791 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9792 INLINE_FAILURE ("gshared");
9794 g_assert (cfg->gshared && cmethod);
9798 * We are compiling a call to a
9799 * generic method from shared code,
9800 * which means that we have to look up
9801 * the method in the rgctx and do an
9805 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9807 if (cfg->llvm_only) {
9808 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9809 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9811 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9812 // FIXME: Avoid initializing imt_arg/vtable_arg
9813 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9815 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9816 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9821 /* Direct calls to icalls */
9823 MonoMethod *wrapper;
9826 /* Inline the wrapper */
9827 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9829 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9830 g_assert (costs > 0);
9831 cfg->real_offset += 5;
9833 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9834 /* *sp is already set by inline_method */
9839 inline_costs += costs;
9848 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9849 MonoInst *val = sp [fsig->param_count];
9851 if (val->type == STACK_OBJ) {
9852 MonoInst *iargs [2];
9857 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9860 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9861 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9862 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9863 emit_write_barrier (cfg, addr, val);
9864 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9865 GSHAREDVT_FAILURE (*ip);
9866 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9867 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9869 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9870 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9871 if (!cmethod->klass->element_class->valuetype && !readonly)
9872 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9873 CHECK_TYPELOAD (cmethod->klass);
9876 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9879 g_assert_not_reached ();
9886 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9890 /* Tail prefix / tail call optimization */
9892 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9893 /* FIXME: runtime generic context pointer for jumps? */
9894 /* FIXME: handle this for generic sharing eventually */
9895 if ((ins_flag & MONO_INST_TAILCALL) &&
9896 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9897 supported_tail_call = TRUE;
9899 if (supported_tail_call) {
9902 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9903 INLINE_FAILURE ("tail call");
9905 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9907 if (cfg->backend->have_op_tail_call) {
9908 /* Handle tail calls similarly to normal calls */
9911 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9913 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9914 call->tail_call = TRUE;
9915 call->method = cmethod;
9916 call->signature = mono_method_signature (cmethod);
9919 * We implement tail calls by storing the actual arguments into the
9920 * argument variables, then emitting a CEE_JMP.
9922 for (i = 0; i < n; ++i) {
9923 /* Prevent argument from being register allocated */
9924 arg_array [i]->flags |= MONO_INST_VOLATILE;
9925 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9927 ins = (MonoInst*)call;
9928 ins->inst_p0 = cmethod;
9929 ins->inst_p1 = arg_array [0];
9930 MONO_ADD_INS (cfg->cbb, ins);
9931 link_bblock (cfg, cfg->cbb, end_bblock);
9932 start_new_bblock = 1;
9934 // FIXME: Eliminate unreachable epilogs
9937 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9938 * only reachable from this call.
9940 GET_BBLOCK (cfg, tblock, ip + 5);
9941 if (tblock == cfg->cbb || tblock->in_count == 0)
9950 * Synchronized wrappers.
9951 * Its hard to determine where to replace a method with its synchronized
9952 * wrapper without causing an infinite recursion. The current solution is
9953 * to add the synchronized wrapper in the trampolines, and to
9954 * change the called method to a dummy wrapper, and resolve that wrapper
9955 * to the real method in mono_jit_compile_method ().
9957 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9958 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9959 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9960 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9964 * Virtual calls in llvm-only mode.
9966 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9967 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9972 INLINE_FAILURE ("call");
9973 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9974 imt_arg, vtable_arg);
9976 if (tail_call && !cfg->llvm_only) {
9977 link_bblock (cfg, cfg->cbb, end_bblock);
9978 start_new_bblock = 1;
9980 // FIXME: Eliminate unreachable epilogs
9983 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9984 * only reachable from this call.
9986 GET_BBLOCK (cfg, tblock, ip + 5);
9987 if (tblock == cfg->cbb || tblock->in_count == 0)
9994 /* End of call, INS should contain the result of the call, if any */
9996 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9999 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10004 if (keep_this_alive) {
10005 MonoInst *dummy_use;
10007 /* See mono_emit_method_call_full () */
10008 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10011 CHECK_CFG_EXCEPTION;
10015 g_assert (*ip == CEE_RET);
10019 constrained_class = NULL;
10020 if (need_seq_point)
10021 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10025 if (cfg->method != method) {
10026 /* return from inlined method */
10028 * If in_count == 0, that means the ret is unreachable due to
10029 * being preceeded by a throw. In that case, inline_method () will
10030 * handle setting the return value
10031 * (test case: test_0_inline_throw ()).
10033 if (return_var && cfg->cbb->in_count) {
10034 MonoType *ret_type = mono_method_signature (method)->ret;
10040 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10043 //g_assert (returnvar != -1);
10044 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10045 cfg->ret_var_set = TRUE;
10048 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10050 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10051 emit_pop_lmf (cfg);
10054 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10056 if (seq_points && !sym_seq_points) {
10058 * Place a seq point here too even through the IL stack is not
10059 * empty, so a step over on
10062 * will work correctly.
10064 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10065 MONO_ADD_INS (cfg->cbb, ins);
10068 g_assert (!return_var);
10072 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10075 emit_setret (cfg, *sp);
10078 if (sp != stack_start)
10080 MONO_INST_NEW (cfg, ins, OP_BR);
10082 ins->inst_target_bb = end_bblock;
10083 MONO_ADD_INS (cfg->cbb, ins);
10084 link_bblock (cfg, cfg->cbb, end_bblock);
10085 start_new_bblock = 1;
10089 MONO_INST_NEW (cfg, ins, OP_BR);
10091 target = ip + 1 + (signed char)(*ip);
10093 GET_BBLOCK (cfg, tblock, target);
10094 link_bblock (cfg, cfg->cbb, tblock);
10095 ins->inst_target_bb = tblock;
10096 if (sp != stack_start) {
10097 handle_stack_args (cfg, stack_start, sp - stack_start);
10099 CHECK_UNVERIFIABLE (cfg);
10101 MONO_ADD_INS (cfg->cbb, ins);
10102 start_new_bblock = 1;
10103 inline_costs += BRANCH_COST;
10117 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10119 target = ip + 1 + *(signed char*)ip;
10122 ADD_BINCOND (NULL);
10125 inline_costs += BRANCH_COST;
10129 MONO_INST_NEW (cfg, ins, OP_BR);
10132 target = ip + 4 + (gint32)read32(ip);
10134 GET_BBLOCK (cfg, tblock, target);
10135 link_bblock (cfg, cfg->cbb, tblock);
10136 ins->inst_target_bb = tblock;
10137 if (sp != stack_start) {
10138 handle_stack_args (cfg, stack_start, sp - stack_start);
10140 CHECK_UNVERIFIABLE (cfg);
10143 MONO_ADD_INS (cfg->cbb, ins);
10145 start_new_bblock = 1;
10146 inline_costs += BRANCH_COST;
10148 case CEE_BRFALSE_S:
10153 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10154 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10155 guint32 opsize = is_short ? 1 : 4;
10157 CHECK_OPSIZE (opsize);
10159 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10162 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10167 GET_BBLOCK (cfg, tblock, target);
10168 link_bblock (cfg, cfg->cbb, tblock);
10169 GET_BBLOCK (cfg, tblock, ip);
10170 link_bblock (cfg, cfg->cbb, tblock);
10172 if (sp != stack_start) {
10173 handle_stack_args (cfg, stack_start, sp - stack_start);
10174 CHECK_UNVERIFIABLE (cfg);
10177 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10178 cmp->sreg1 = sp [0]->dreg;
10179 type_from_op (cfg, cmp, sp [0], NULL);
10182 #if SIZEOF_REGISTER == 4
10183 if (cmp->opcode == OP_LCOMPARE_IMM) {
10184 /* Convert it to OP_LCOMPARE */
10185 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10186 ins->type = STACK_I8;
10187 ins->dreg = alloc_dreg (cfg, STACK_I8);
10189 MONO_ADD_INS (cfg->cbb, ins);
10190 cmp->opcode = OP_LCOMPARE;
10191 cmp->sreg2 = ins->dreg;
10194 MONO_ADD_INS (cfg->cbb, cmp);
10196 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10197 type_from_op (cfg, ins, sp [0], NULL);
10198 MONO_ADD_INS (cfg->cbb, ins);
10199 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10200 GET_BBLOCK (cfg, tblock, target);
10201 ins->inst_true_bb = tblock;
10202 GET_BBLOCK (cfg, tblock, ip);
10203 ins->inst_false_bb = tblock;
10204 start_new_bblock = 2;
10207 inline_costs += BRANCH_COST;
10222 MONO_INST_NEW (cfg, ins, *ip);
10224 target = ip + 4 + (gint32)read32(ip);
10227 ADD_BINCOND (NULL);
10230 inline_costs += BRANCH_COST;
10234 MonoBasicBlock **targets;
10235 MonoBasicBlock *default_bblock;
10236 MonoJumpInfoBBTable *table;
10237 int offset_reg = alloc_preg (cfg);
10238 int target_reg = alloc_preg (cfg);
10239 int table_reg = alloc_preg (cfg);
10240 int sum_reg = alloc_preg (cfg);
10241 gboolean use_op_switch;
10245 n = read32 (ip + 1);
10248 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10252 CHECK_OPSIZE (n * sizeof (guint32));
10253 target = ip + n * sizeof (guint32);
10255 GET_BBLOCK (cfg, default_bblock, target);
10256 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10258 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10259 for (i = 0; i < n; ++i) {
10260 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10261 targets [i] = tblock;
10262 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10266 if (sp != stack_start) {
10268 * Link the current bb with the targets as well, so handle_stack_args
10269 * will set their in_stack correctly.
10271 link_bblock (cfg, cfg->cbb, default_bblock);
10272 for (i = 0; i < n; ++i)
10273 link_bblock (cfg, cfg->cbb, targets [i]);
10275 handle_stack_args (cfg, stack_start, sp - stack_start);
10277 CHECK_UNVERIFIABLE (cfg);
10279 /* Undo the links */
10280 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10281 for (i = 0; i < n; ++i)
10282 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10288 for (i = 0; i < n; ++i)
10289 link_bblock (cfg, cfg->cbb, targets [i]);
10291 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10292 table->table = targets;
10293 table->table_size = n;
10295 use_op_switch = FALSE;
10297 /* ARM implements SWITCH statements differently */
10298 /* FIXME: Make it use the generic implementation */
10299 if (!cfg->compile_aot)
10300 use_op_switch = TRUE;
10303 if (COMPILE_LLVM (cfg))
10304 use_op_switch = TRUE;
10306 cfg->cbb->has_jump_table = 1;
10308 if (use_op_switch) {
10309 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10310 ins->sreg1 = src1->dreg;
10311 ins->inst_p0 = table;
10312 ins->inst_many_bb = targets;
10313 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10314 MONO_ADD_INS (cfg->cbb, ins);
10316 if (sizeof (gpointer) == 8)
10317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10319 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10321 #if SIZEOF_REGISTER == 8
10322 /* The upper word might not be zero, and we add it to a 64 bit address later */
10323 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10326 if (cfg->compile_aot) {
10327 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10329 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10330 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10331 ins->inst_p0 = table;
10332 ins->dreg = table_reg;
10333 MONO_ADD_INS (cfg->cbb, ins);
10336 /* FIXME: Use load_memindex */
10337 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10339 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10341 start_new_bblock = 1;
10342 inline_costs += (BRANCH_COST * 2);
10355 case CEE_LDIND_REF:
10362 dreg = alloc_freg (cfg);
10365 dreg = alloc_lreg (cfg);
10367 case CEE_LDIND_REF:
10368 dreg = alloc_ireg_ref (cfg);
10371 dreg = alloc_preg (cfg);
10374 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10375 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10376 if (*ip == CEE_LDIND_R4)
10377 ins->type = cfg->r4_stack_type;
10378 ins->flags |= ins_flag;
10379 MONO_ADD_INS (cfg->cbb, ins);
10381 if (ins_flag & MONO_INST_VOLATILE) {
10382 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10383 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10388 case CEE_STIND_REF:
10399 if (ins_flag & MONO_INST_VOLATILE) {
10400 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10401 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10404 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10405 ins->flags |= ins_flag;
10408 MONO_ADD_INS (cfg->cbb, ins);
10410 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10411 emit_write_barrier (cfg, sp [0], sp [1]);
10420 MONO_INST_NEW (cfg, ins, (*ip));
10422 ins->sreg1 = sp [0]->dreg;
10423 ins->sreg2 = sp [1]->dreg;
10424 type_from_op (cfg, ins, sp [0], sp [1]);
10426 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10428 /* Use the immediate opcodes if possible */
10429 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10430 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10431 if (imm_opcode != -1) {
10432 ins->opcode = imm_opcode;
10433 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10436 NULLIFY_INS (sp [1]);
10440 MONO_ADD_INS ((cfg)->cbb, (ins));
10442 *sp++ = mono_decompose_opcode (cfg, ins);
10459 MONO_INST_NEW (cfg, ins, (*ip));
10461 ins->sreg1 = sp [0]->dreg;
10462 ins->sreg2 = sp [1]->dreg;
10463 type_from_op (cfg, ins, sp [0], sp [1]);
10465 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10466 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10468 /* FIXME: Pass opcode to is_inst_imm */
10470 /* Use the immediate opcodes if possible */
10471 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10472 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10473 if (imm_opcode != -1) {
10474 ins->opcode = imm_opcode;
10475 if (sp [1]->opcode == OP_I8CONST) {
10476 #if SIZEOF_REGISTER == 8
10477 ins->inst_imm = sp [1]->inst_l;
10479 ins->inst_ls_word = sp [1]->inst_ls_word;
10480 ins->inst_ms_word = sp [1]->inst_ms_word;
10484 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10487 /* Might be followed by an instruction added by add_widen_op */
10488 if (sp [1]->next == NULL)
10489 NULLIFY_INS (sp [1]);
10492 MONO_ADD_INS ((cfg)->cbb, (ins));
10494 *sp++ = mono_decompose_opcode (cfg, ins);
10507 case CEE_CONV_OVF_I8:
10508 case CEE_CONV_OVF_U8:
10509 case CEE_CONV_R_UN:
10512 /* Special case this earlier so we have long constants in the IR */
10513 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10514 int data = sp [-1]->inst_c0;
10515 sp [-1]->opcode = OP_I8CONST;
10516 sp [-1]->type = STACK_I8;
10517 #if SIZEOF_REGISTER == 8
10518 if ((*ip) == CEE_CONV_U8)
10519 sp [-1]->inst_c0 = (guint32)data;
10521 sp [-1]->inst_c0 = data;
10523 sp [-1]->inst_ls_word = data;
10524 if ((*ip) == CEE_CONV_U8)
10525 sp [-1]->inst_ms_word = 0;
10527 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10529 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10536 case CEE_CONV_OVF_I4:
10537 case CEE_CONV_OVF_I1:
10538 case CEE_CONV_OVF_I2:
10539 case CEE_CONV_OVF_I:
10540 case CEE_CONV_OVF_U:
10543 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10544 ADD_UNOP (CEE_CONV_OVF_I8);
10551 case CEE_CONV_OVF_U1:
10552 case CEE_CONV_OVF_U2:
10553 case CEE_CONV_OVF_U4:
10556 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10557 ADD_UNOP (CEE_CONV_OVF_U8);
10564 case CEE_CONV_OVF_I1_UN:
10565 case CEE_CONV_OVF_I2_UN:
10566 case CEE_CONV_OVF_I4_UN:
10567 case CEE_CONV_OVF_I8_UN:
10568 case CEE_CONV_OVF_U1_UN:
10569 case CEE_CONV_OVF_U2_UN:
10570 case CEE_CONV_OVF_U4_UN:
10571 case CEE_CONV_OVF_U8_UN:
10572 case CEE_CONV_OVF_I_UN:
10573 case CEE_CONV_OVF_U_UN:
10580 CHECK_CFG_EXCEPTION;
10584 case CEE_ADD_OVF_UN:
10586 case CEE_MUL_OVF_UN:
10588 case CEE_SUB_OVF_UN:
10594 GSHAREDVT_FAILURE (*ip);
10597 token = read32 (ip + 1);
10598 klass = mini_get_class (method, token, generic_context);
10599 CHECK_TYPELOAD (klass);
10601 if (generic_class_is_reference_type (cfg, klass)) {
10602 MonoInst *store, *load;
10603 int dreg = alloc_ireg_ref (cfg);
10605 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10606 load->flags |= ins_flag;
10607 MONO_ADD_INS (cfg->cbb, load);
10609 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10610 store->flags |= ins_flag;
10611 MONO_ADD_INS (cfg->cbb, store);
10613 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10614 emit_write_barrier (cfg, sp [0], sp [1]);
10616 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10622 int loc_index = -1;
10628 token = read32 (ip + 1);
10629 klass = mini_get_class (method, token, generic_context);
10630 CHECK_TYPELOAD (klass);
10632 /* Optimize the common ldobj+stloc combination */
10635 loc_index = ip [6];
10642 loc_index = ip [5] - CEE_STLOC_0;
10649 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10650 CHECK_LOCAL (loc_index);
10652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10653 ins->dreg = cfg->locals [loc_index]->dreg;
10654 ins->flags |= ins_flag;
10657 if (ins_flag & MONO_INST_VOLATILE) {
10658 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10659 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10665 /* Optimize the ldobj+stobj combination */
10666 /* The reference case ends up being a load+store anyway */
10667 /* Skip this if the operation is volatile. */
10668 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10673 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10681 ins->flags |= ins_flag;
10684 if (ins_flag & MONO_INST_VOLATILE) {
10685 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10686 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10695 CHECK_STACK_OVF (1);
10697 n = read32 (ip + 1);
10699 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10700 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10701 ins->type = STACK_OBJ;
10704 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10705 MonoInst *iargs [1];
10706 char *str = (char *)mono_method_get_wrapper_data (method, n);
10708 if (cfg->compile_aot)
10709 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10711 EMIT_NEW_PCONST (cfg, iargs [0], str);
10712 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10714 if (cfg->opt & MONO_OPT_SHARED) {
10715 MonoInst *iargs [3];
10717 if (cfg->compile_aot) {
10718 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10720 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10721 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10722 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10723 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10724 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10726 if (cfg->cbb->out_of_line) {
10727 MonoInst *iargs [2];
10729 if (image == mono_defaults.corlib) {
10731 * Avoid relocations in AOT and save some space by using a
10732 * version of helper_ldstr specialized to mscorlib.
10734 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10735 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10737 /* Avoid creating the string object */
10738 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10739 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10740 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10744 if (cfg->compile_aot) {
10745 NEW_LDSTRCONST (cfg, ins, image, n);
10747 MONO_ADD_INS (cfg->cbb, ins);
10750 NEW_PCONST (cfg, ins, NULL);
10751 ins->type = STACK_OBJ;
10752 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10754 OUT_OF_MEMORY_FAILURE;
10757 MONO_ADD_INS (cfg->cbb, ins);
10766 MonoInst *iargs [2];
10767 MonoMethodSignature *fsig;
10770 MonoInst *vtable_arg = NULL;
10773 token = read32 (ip + 1);
10774 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10777 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10780 mono_save_token_info (cfg, image, token, cmethod);
10782 if (!mono_class_init (cmethod->klass))
10783 TYPE_LOAD_ERROR (cmethod->klass);
10785 context_used = mini_method_check_context_used (cfg, cmethod);
10787 if (mono_security_core_clr_enabled ())
10788 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10790 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10791 emit_class_init (cfg, cmethod->klass);
10792 CHECK_TYPELOAD (cmethod->klass);
10796 if (cfg->gsharedvt) {
10797 if (mini_is_gsharedvt_variable_signature (sig))
10798 GSHAREDVT_FAILURE (*ip);
10802 n = fsig->param_count;
10806 * Generate smaller code for the common newobj <exception> instruction in
10807 * argument checking code.
10809 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10810 is_exception_class (cmethod->klass) && n <= 2 &&
10811 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10812 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10813 MonoInst *iargs [3];
10817 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10820 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10823 iargs [1] = sp [0];
10824 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10827 iargs [1] = sp [0];
10828 iargs [2] = sp [1];
10829 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10832 g_assert_not_reached ();
10840 /* move the args to allow room for 'this' in the first position */
10846 /* check_call_signature () requires sp[0] to be set */
10847 this_ins.type = STACK_OBJ;
10848 sp [0] = &this_ins;
10849 if (check_call_signature (cfg, fsig, sp))
10854 if (mini_class_is_system_array (cmethod->klass)) {
10855 *sp = emit_get_rgctx_method (cfg, context_used,
10856 cmethod, MONO_RGCTX_INFO_METHOD);
10858 /* Avoid varargs in the common case */
10859 if (fsig->param_count == 1)
10860 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10861 else if (fsig->param_count == 2)
10862 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10863 else if (fsig->param_count == 3)
10864 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10865 else if (fsig->param_count == 4)
10866 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10868 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10869 } else if (cmethod->string_ctor) {
10870 g_assert (!context_used);
10871 g_assert (!vtable_arg);
10872 /* we simply pass a null pointer */
10873 EMIT_NEW_PCONST (cfg, *sp, NULL);
10874 /* now call the string ctor */
10875 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10877 if (cmethod->klass->valuetype) {
10878 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10879 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10880 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10885 * The code generated by mini_emit_virtual_call () expects
10886 * iargs [0] to be a boxed instance, but luckily the vcall
10887 * will be transformed into a normal call there.
10889 } else if (context_used) {
10890 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10893 MonoVTable *vtable = NULL;
10895 if (!cfg->compile_aot)
10896 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10897 CHECK_TYPELOAD (cmethod->klass);
10900 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10901 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10902 * As a workaround, we call class cctors before allocating objects.
10904 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10905 emit_class_init (cfg, cmethod->klass);
10906 if (cfg->verbose_level > 2)
10907 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10908 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10911 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10914 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10917 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10919 /* Now call the actual ctor */
10920 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10921 CHECK_CFG_EXCEPTION;
10924 if (alloc == NULL) {
10926 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10927 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10935 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10936 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10939 case CEE_CASTCLASS:
10943 token = read32 (ip + 1);
10944 klass = mini_get_class (method, token, generic_context);
10945 CHECK_TYPELOAD (klass);
10946 if (sp [0]->type != STACK_OBJ)
10949 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10950 CHECK_CFG_EXCEPTION;
10959 token = read32 (ip + 1);
10960 klass = mini_get_class (method, token, generic_context);
10961 CHECK_TYPELOAD (klass);
10962 if (sp [0]->type != STACK_OBJ)
10965 context_used = mini_class_check_context_used (cfg, klass);
10967 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10968 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10969 MonoInst *args [3];
10976 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10979 idx = get_castclass_cache_idx (cfg);
10980 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10982 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10985 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10986 MonoMethod *mono_isinst;
10987 MonoInst *iargs [1];
10990 mono_isinst = mono_marshal_get_isinst (klass);
10991 iargs [0] = sp [0];
10993 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10994 iargs, ip, cfg->real_offset, TRUE);
10995 CHECK_CFG_EXCEPTION;
10996 g_assert (costs > 0);
10999 cfg->real_offset += 5;
11003 inline_costs += costs;
11006 ins = handle_isinst (cfg, klass, *sp, context_used);
11007 CHECK_CFG_EXCEPTION;
11013 case CEE_UNBOX_ANY: {
11014 MonoInst *res, *addr;
11019 token = read32 (ip + 1);
11020 klass = mini_get_class (method, token, generic_context);
11021 CHECK_TYPELOAD (klass);
11023 mono_save_token_info (cfg, image, token, klass);
11025 context_used = mini_class_check_context_used (cfg, klass);
11027 if (mini_is_gsharedvt_klass (klass)) {
11028 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11030 } else if (generic_class_is_reference_type (cfg, klass)) {
11031 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11032 CHECK_CFG_EXCEPTION;
11033 } else if (mono_class_is_nullable (klass)) {
11034 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11036 addr = handle_unbox (cfg, klass, sp, context_used);
11038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11049 MonoClass *enum_class;
11050 MonoMethod *has_flag;
11056 token = read32 (ip + 1);
11057 klass = mini_get_class (method, token, generic_context);
11058 CHECK_TYPELOAD (klass);
11060 mono_save_token_info (cfg, image, token, klass);
11062 context_used = mini_class_check_context_used (cfg, klass);
11064 if (generic_class_is_reference_type (cfg, klass)) {
11070 if (klass == mono_defaults.void_class)
11072 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11074 /* frequent check in generic code: box (struct), brtrue */
11079 * <push int/long ptr>
11082 * constrained. MyFlags
11083 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11085 * If we find this sequence and the operand types on box and constrained
11086 * are equal, we can emit a specialized instruction sequence instead of
11087 * the very slow HasFlag () call.
11089 if ((cfg->opt & MONO_OPT_INTRINS) &&
11090 /* Cheap checks first. */
11091 ip + 5 + 6 + 5 < end &&
11092 ip [5] == CEE_PREFIX1 &&
11093 ip [6] == CEE_CONSTRAINED_ &&
11094 ip [11] == CEE_CALLVIRT &&
11095 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11096 mono_class_is_enum (klass) &&
11097 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11098 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11099 has_flag->klass == mono_defaults.enum_class &&
11100 !strcmp (has_flag->name, "HasFlag") &&
11101 has_flag->signature->hasthis &&
11102 has_flag->signature->param_count == 1) {
11103 CHECK_TYPELOAD (enum_class);
11105 if (enum_class == klass) {
11106 MonoInst *enum_this, *enum_flag;
11111 enum_this = sp [0];
11112 enum_flag = sp [1];
11114 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11119 // FIXME: LLVM can't handle the inconsistent bb linking
11120 if (!mono_class_is_nullable (klass) &&
11121 !mini_is_gsharedvt_klass (klass) &&
11122 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11123 (ip [5] == CEE_BRTRUE ||
11124 ip [5] == CEE_BRTRUE_S ||
11125 ip [5] == CEE_BRFALSE ||
11126 ip [5] == CEE_BRFALSE_S)) {
11127 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11129 MonoBasicBlock *true_bb, *false_bb;
11133 if (cfg->verbose_level > 3) {
11134 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11135 printf ("<box+brtrue opt>\n");
11140 case CEE_BRFALSE_S:
11143 target = ip + 1 + (signed char)(*ip);
11150 target = ip + 4 + (gint)(read32 (ip));
11154 g_assert_not_reached ();
11158 * We need to link both bblocks, since it is needed for handling stack
11159 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11160 * Branching to only one of them would lead to inconsistencies, so
11161 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11163 GET_BBLOCK (cfg, true_bb, target);
11164 GET_BBLOCK (cfg, false_bb, ip);
11166 mono_link_bblock (cfg, cfg->cbb, true_bb);
11167 mono_link_bblock (cfg, cfg->cbb, false_bb);
11169 if (sp != stack_start) {
11170 handle_stack_args (cfg, stack_start, sp - stack_start);
11172 CHECK_UNVERIFIABLE (cfg);
11175 if (COMPILE_LLVM (cfg)) {
11176 dreg = alloc_ireg (cfg);
11177 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11178 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11180 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11182 /* The JIT can't eliminate the iconst+compare */
11183 MONO_INST_NEW (cfg, ins, OP_BR);
11184 ins->inst_target_bb = is_true ? true_bb : false_bb;
11185 MONO_ADD_INS (cfg->cbb, ins);
11188 start_new_bblock = 1;
11192 *sp++ = handle_box (cfg, val, klass, context_used);
11194 CHECK_CFG_EXCEPTION;
11203 token = read32 (ip + 1);
11204 klass = mini_get_class (method, token, generic_context);
11205 CHECK_TYPELOAD (klass);
11207 mono_save_token_info (cfg, image, token, klass);
11209 context_used = mini_class_check_context_used (cfg, klass);
11211 if (mono_class_is_nullable (klass)) {
11214 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11215 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11219 ins = handle_unbox (cfg, klass, sp, context_used);
11232 MonoClassField *field;
11233 #ifndef DISABLE_REMOTING
11237 gboolean is_instance;
11239 gpointer addr = NULL;
11240 gboolean is_special_static;
11242 MonoInst *store_val = NULL;
11243 MonoInst *thread_ins;
11246 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11248 if (op == CEE_STFLD) {
11251 store_val = sp [1];
11256 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11258 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11261 if (op == CEE_STSFLD) {
11264 store_val = sp [0];
11269 token = read32 (ip + 1);
11270 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11271 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11272 klass = field->parent;
11275 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11278 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11279 FIELD_ACCESS_FAILURE (method, field);
11280 mono_class_init (klass);
11282 /* if the class is Critical then transparent code cannot access it's fields */
11283 if (!is_instance && mono_security_core_clr_enabled ())
11284 ensure_method_is_allowed_to_access_field (cfg, method, field);
11286 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11287 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11288 if (mono_security_core_clr_enabled ())
11289 ensure_method_is_allowed_to_access_field (cfg, method, field);
11292 ftype = mono_field_get_type (field);
11295 * LDFLD etc. is usable on static fields as well, so convert those cases to
11298 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11310 g_assert_not_reached ();
11312 is_instance = FALSE;
11315 context_used = mini_class_check_context_used (cfg, klass);
11317 /* INSTANCE CASE */
11319 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11320 if (op == CEE_STFLD) {
11321 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11323 #ifndef DISABLE_REMOTING
11324 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11325 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11326 MonoInst *iargs [5];
11328 GSHAREDVT_FAILURE (op);
11330 iargs [0] = sp [0];
11331 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11332 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11333 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11335 iargs [4] = sp [1];
11337 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11338 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11339 iargs, ip, cfg->real_offset, TRUE);
11340 CHECK_CFG_EXCEPTION;
11341 g_assert (costs > 0);
11343 cfg->real_offset += 5;
11345 inline_costs += costs;
11347 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11354 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11356 if (mini_is_gsharedvt_klass (klass)) {
11357 MonoInst *offset_ins;
11359 context_used = mini_class_check_context_used (cfg, klass);
11361 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11362 /* The value is offset by 1 */
11363 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11364 dreg = alloc_ireg_mp (cfg);
11365 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11366 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11367 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11369 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11371 if (sp [0]->opcode != OP_LDADDR)
11372 store->flags |= MONO_INST_FAULT;
11374 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11375 /* insert call to write barrier */
11379 dreg = alloc_ireg_mp (cfg);
11380 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11381 emit_write_barrier (cfg, ptr, sp [1]);
11384 store->flags |= ins_flag;
11391 #ifndef DISABLE_REMOTING
11392 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11393 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11394 MonoInst *iargs [4];
11396 GSHAREDVT_FAILURE (op);
11398 iargs [0] = sp [0];
11399 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11400 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11401 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11402 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11403 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11404 iargs, ip, cfg->real_offset, TRUE);
11405 CHECK_CFG_EXCEPTION;
11406 g_assert (costs > 0);
11408 cfg->real_offset += 5;
11412 inline_costs += costs;
11414 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11420 if (sp [0]->type == STACK_VTYPE) {
11423 /* Have to compute the address of the variable */
11425 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11427 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11429 g_assert (var->klass == klass);
11431 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11435 if (op == CEE_LDFLDA) {
11436 if (sp [0]->type == STACK_OBJ) {
11437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11438 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11441 dreg = alloc_ireg_mp (cfg);
11443 if (mini_is_gsharedvt_klass (klass)) {
11444 MonoInst *offset_ins;
11446 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11447 /* The value is offset by 1 */
11448 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11449 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11451 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11453 ins->klass = mono_class_from_mono_type (field->type);
11454 ins->type = STACK_MP;
11459 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11461 if (mini_is_gsharedvt_klass (klass)) {
11462 MonoInst *offset_ins;
11464 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11465 /* The value is offset by 1 */
11466 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11467 dreg = alloc_ireg_mp (cfg);
11468 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11469 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11471 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11473 load->flags |= ins_flag;
11474 if (sp [0]->opcode != OP_LDADDR)
11475 load->flags |= MONO_INST_FAULT;
11487 context_used = mini_class_check_context_used (cfg, klass);
11489 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11492 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11493 * to be called here.
11495 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11496 mono_class_vtable (cfg->domain, klass);
11497 CHECK_TYPELOAD (klass);
11499 mono_domain_lock (cfg->domain);
11500 if (cfg->domain->special_static_fields)
11501 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11502 mono_domain_unlock (cfg->domain);
11504 is_special_static = mono_class_field_is_special_static (field);
11506 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11507 thread_ins = mono_get_thread_intrinsic (cfg);
11511 /* Generate IR to compute the field address */
11512 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11514 * Fast access to TLS data
11515 * Inline version of get_thread_static_data () in
11519 int idx, static_data_reg, array_reg, dreg;
11521 GSHAREDVT_FAILURE (op);
11523 MONO_ADD_INS (cfg->cbb, thread_ins);
11524 static_data_reg = alloc_ireg (cfg);
11525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11527 if (cfg->compile_aot) {
11528 int offset_reg, offset2_reg, idx_reg;
11530 /* For TLS variables, this will return the TLS offset */
11531 EMIT_NEW_SFLDACONST (cfg, ins, field);
11532 offset_reg = ins->dreg;
11533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11534 idx_reg = alloc_ireg (cfg);
11535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11537 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11538 array_reg = alloc_ireg (cfg);
11539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11540 offset2_reg = alloc_ireg (cfg);
11541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11543 dreg = alloc_ireg (cfg);
11544 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11546 offset = (gsize)addr & 0x7fffffff;
11547 idx = offset & 0x3f;
11549 array_reg = alloc_ireg (cfg);
11550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11551 dreg = alloc_ireg (cfg);
11552 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11554 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11555 (cfg->compile_aot && is_special_static) ||
11556 (context_used && is_special_static)) {
11557 MonoInst *iargs [2];
11559 g_assert (field->parent);
11560 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11561 if (context_used) {
11562 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11563 field, MONO_RGCTX_INFO_CLASS_FIELD);
11565 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11567 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11568 } else if (context_used) {
11569 MonoInst *static_data;
11572 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11573 method->klass->name_space, method->klass->name, method->name,
11574 depth, field->offset);
11577 if (mono_class_needs_cctor_run (klass, method))
11578 emit_class_init (cfg, klass);
11581 * The pointer we're computing here is
11583 * super_info.static_data + field->offset
11585 static_data = emit_get_rgctx_klass (cfg, context_used,
11586 klass, MONO_RGCTX_INFO_STATIC_DATA);
11588 if (mini_is_gsharedvt_klass (klass)) {
11589 MonoInst *offset_ins;
11591 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11592 /* The value is offset by 1 */
11593 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11594 dreg = alloc_ireg_mp (cfg);
11595 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11596 } else if (field->offset == 0) {
11599 int addr_reg = mono_alloc_preg (cfg);
11600 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11602 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11603 MonoInst *iargs [2];
11605 g_assert (field->parent);
11606 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11607 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11608 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11610 MonoVTable *vtable = NULL;
11612 if (!cfg->compile_aot)
11613 vtable = mono_class_vtable (cfg->domain, klass);
11614 CHECK_TYPELOAD (klass);
11617 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11618 if (!(g_slist_find (class_inits, klass))) {
11619 emit_class_init (cfg, klass);
11620 if (cfg->verbose_level > 2)
11621 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11622 class_inits = g_slist_prepend (class_inits, klass);
11625 if (cfg->run_cctors) {
11627 /* This makes so that inline cannot trigger */
11628 /* .cctors: too many apps depend on them */
11629 /* running with a specific order... */
11631 if (! vtable->initialized)
11632 INLINE_FAILURE ("class init");
11633 ex = mono_runtime_class_init_full (vtable, FALSE);
11635 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11636 mono_error_set_exception_instance (&cfg->error, ex);
11637 g_assert_not_reached ();
11638 goto exception_exit;
11642 if (cfg->compile_aot)
11643 EMIT_NEW_SFLDACONST (cfg, ins, field);
11646 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11648 EMIT_NEW_PCONST (cfg, ins, addr);
11651 MonoInst *iargs [1];
11652 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11653 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11657 /* Generate IR to do the actual load/store operation */
11659 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11660 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11661 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11664 if (op == CEE_LDSFLDA) {
11665 ins->klass = mono_class_from_mono_type (ftype);
11666 ins->type = STACK_PTR;
11668 } else if (op == CEE_STSFLD) {
11671 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11672 store->flags |= ins_flag;
11674 gboolean is_const = FALSE;
11675 MonoVTable *vtable = NULL;
11676 gpointer addr = NULL;
11678 if (!context_used) {
11679 vtable = mono_class_vtable (cfg->domain, klass);
11680 CHECK_TYPELOAD (klass);
11682 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11683 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11684 int ro_type = ftype->type;
11686 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11687 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11688 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11691 GSHAREDVT_FAILURE (op);
11693 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11696 case MONO_TYPE_BOOLEAN:
11698 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11702 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11705 case MONO_TYPE_CHAR:
11707 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11711 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11716 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11720 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11725 case MONO_TYPE_PTR:
11726 case MONO_TYPE_FNPTR:
11727 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11728 type_to_eval_stack_type ((cfg), field->type, *sp);
11731 case MONO_TYPE_STRING:
11732 case MONO_TYPE_OBJECT:
11733 case MONO_TYPE_CLASS:
11734 case MONO_TYPE_SZARRAY:
11735 case MONO_TYPE_ARRAY:
11736 if (!mono_gc_is_moving ()) {
11737 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11738 type_to_eval_stack_type ((cfg), field->type, *sp);
11746 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11751 case MONO_TYPE_VALUETYPE:
11761 CHECK_STACK_OVF (1);
11763 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11764 load->flags |= ins_flag;
11770 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11771 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11772 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11783 token = read32 (ip + 1);
11784 klass = mini_get_class (method, token, generic_context);
11785 CHECK_TYPELOAD (klass);
11786 if (ins_flag & MONO_INST_VOLATILE) {
11787 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11788 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11790 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11791 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11792 ins->flags |= ins_flag;
11793 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11794 generic_class_is_reference_type (cfg, klass)) {
11795 /* insert call to write barrier */
11796 emit_write_barrier (cfg, sp [0], sp [1]);
11808 const char *data_ptr;
11810 guint32 field_token;
11816 token = read32 (ip + 1);
11818 klass = mini_get_class (method, token, generic_context);
11819 CHECK_TYPELOAD (klass);
11821 context_used = mini_class_check_context_used (cfg, klass);
11823 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11824 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11825 ins->sreg1 = sp [0]->dreg;
11826 ins->type = STACK_I4;
11827 ins->dreg = alloc_ireg (cfg);
11828 MONO_ADD_INS (cfg->cbb, ins);
11829 *sp = mono_decompose_opcode (cfg, ins);
11832 if (context_used) {
11833 MonoInst *args [3];
11834 MonoClass *array_class = mono_array_class_get (klass, 1);
11835 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11837 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11840 args [0] = emit_get_rgctx_klass (cfg, context_used,
11841 array_class, MONO_RGCTX_INFO_VTABLE);
11846 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11848 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11850 if (cfg->opt & MONO_OPT_SHARED) {
11851 /* Decompose now to avoid problems with references to the domainvar */
11852 MonoInst *iargs [3];
11854 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11855 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11856 iargs [2] = sp [0];
11858 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11860 /* Decompose later since it is needed by abcrem */
11861 MonoClass *array_type = mono_array_class_get (klass, 1);
11862 mono_class_vtable (cfg->domain, array_type);
11863 CHECK_TYPELOAD (array_type);
11865 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11866 ins->dreg = alloc_ireg_ref (cfg);
11867 ins->sreg1 = sp [0]->dreg;
11868 ins->inst_newa_class = klass;
11869 ins->type = STACK_OBJ;
11870 ins->klass = array_type;
11871 MONO_ADD_INS (cfg->cbb, ins);
11872 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11873 cfg->cbb->has_array_access = TRUE;
11875 /* Needed so mono_emit_load_get_addr () gets called */
11876 mono_get_got_var (cfg);
11886 * we inline/optimize the initialization sequence if possible.
11887 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11888 * for small sizes open code the memcpy
11889 * ensure the rva field is big enough
11891 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11892 MonoMethod *memcpy_method = get_memcpy_method ();
11893 MonoInst *iargs [3];
11894 int add_reg = alloc_ireg_mp (cfg);
11896 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11897 if (cfg->compile_aot) {
11898 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11900 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11902 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11903 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11912 if (sp [0]->type != STACK_OBJ)
11915 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11916 ins->dreg = alloc_preg (cfg);
11917 ins->sreg1 = sp [0]->dreg;
11918 ins->type = STACK_I4;
11919 /* This flag will be inherited by the decomposition */
11920 ins->flags |= MONO_INST_FAULT;
11921 MONO_ADD_INS (cfg->cbb, ins);
11922 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11923 cfg->cbb->has_array_access = TRUE;
11931 if (sp [0]->type != STACK_OBJ)
11934 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11936 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11937 CHECK_TYPELOAD (klass);
11938 /* we need to make sure that this array is exactly the type it needs
11939 * to be for correctness. the wrappers are lax with their usage
11940 * so we need to ignore them here
11942 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11943 MonoClass *array_class = mono_array_class_get (klass, 1);
11944 mini_emit_check_array_type (cfg, sp [0], array_class);
11945 CHECK_TYPELOAD (array_class);
11949 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11954 case CEE_LDELEM_I1:
11955 case CEE_LDELEM_U1:
11956 case CEE_LDELEM_I2:
11957 case CEE_LDELEM_U2:
11958 case CEE_LDELEM_I4:
11959 case CEE_LDELEM_U4:
11960 case CEE_LDELEM_I8:
11962 case CEE_LDELEM_R4:
11963 case CEE_LDELEM_R8:
11964 case CEE_LDELEM_REF: {
11970 if (*ip == CEE_LDELEM) {
11972 token = read32 (ip + 1);
11973 klass = mini_get_class (method, token, generic_context);
11974 CHECK_TYPELOAD (klass);
11975 mono_class_init (klass);
11978 klass = array_access_to_klass (*ip);
11980 if (sp [0]->type != STACK_OBJ)
11983 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11985 if (mini_is_gsharedvt_variable_klass (klass)) {
11986 // FIXME-VT: OP_ICONST optimization
11987 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11988 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11989 ins->opcode = OP_LOADV_MEMBASE;
11990 } else if (sp [1]->opcode == OP_ICONST) {
11991 int array_reg = sp [0]->dreg;
11992 int index_reg = sp [1]->dreg;
11993 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11995 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11996 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11998 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11999 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12001 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12002 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12005 if (*ip == CEE_LDELEM)
12012 case CEE_STELEM_I1:
12013 case CEE_STELEM_I2:
12014 case CEE_STELEM_I4:
12015 case CEE_STELEM_I8:
12016 case CEE_STELEM_R4:
12017 case CEE_STELEM_R8:
12018 case CEE_STELEM_REF:
12023 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12025 if (*ip == CEE_STELEM) {
12027 token = read32 (ip + 1);
12028 klass = mini_get_class (method, token, generic_context);
12029 CHECK_TYPELOAD (klass);
12030 mono_class_init (klass);
12033 klass = array_access_to_klass (*ip);
12035 if (sp [0]->type != STACK_OBJ)
12038 emit_array_store (cfg, klass, sp, TRUE);
12040 if (*ip == CEE_STELEM)
12047 case CEE_CKFINITE: {
12051 if (cfg->llvm_only) {
12052 MonoInst *iargs [1];
12054 iargs [0] = sp [0];
12055 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12057 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12058 ins->sreg1 = sp [0]->dreg;
12059 ins->dreg = alloc_freg (cfg);
12060 ins->type = STACK_R8;
12061 MONO_ADD_INS (cfg->cbb, ins);
12063 *sp++ = mono_decompose_opcode (cfg, ins);
12069 case CEE_REFANYVAL: {
12070 MonoInst *src_var, *src;
12072 int klass_reg = alloc_preg (cfg);
12073 int dreg = alloc_preg (cfg);
12075 GSHAREDVT_FAILURE (*ip);
12078 MONO_INST_NEW (cfg, ins, *ip);
12081 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12082 CHECK_TYPELOAD (klass);
12084 context_used = mini_class_check_context_used (cfg, klass);
12087 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12089 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12090 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12093 if (context_used) {
12094 MonoInst *klass_ins;
12096 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12097 klass, MONO_RGCTX_INFO_KLASS);
12100 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12101 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12103 mini_emit_class_check (cfg, klass_reg, klass);
12105 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12106 ins->type = STACK_MP;
12107 ins->klass = klass;
12112 case CEE_MKREFANY: {
12113 MonoInst *loc, *addr;
12115 GSHAREDVT_FAILURE (*ip);
12118 MONO_INST_NEW (cfg, ins, *ip);
12121 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12122 CHECK_TYPELOAD (klass);
12124 context_used = mini_class_check_context_used (cfg, klass);
12126 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12127 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12129 if (context_used) {
12130 MonoInst *const_ins;
12131 int type_reg = alloc_preg (cfg);
12133 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12136 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12137 } else if (cfg->compile_aot) {
12138 int const_reg = alloc_preg (cfg);
12139 int type_reg = alloc_preg (cfg);
12141 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12142 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12144 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12146 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12147 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12149 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12151 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12152 ins->type = STACK_VTYPE;
12153 ins->klass = mono_defaults.typed_reference_class;
12158 case CEE_LDTOKEN: {
12160 MonoClass *handle_class;
12162 CHECK_STACK_OVF (1);
12165 n = read32 (ip + 1);
12167 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12168 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12169 handle = mono_method_get_wrapper_data (method, n);
12170 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12171 if (handle_class == mono_defaults.typehandle_class)
12172 handle = &((MonoClass*)handle)->byval_arg;
12175 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12180 mono_class_init (handle_class);
12181 if (cfg->gshared) {
12182 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12183 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12184 /* This case handles ldtoken
12185 of an open type, like for
12188 } else if (handle_class == mono_defaults.typehandle_class) {
12189 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12190 } else if (handle_class == mono_defaults.fieldhandle_class)
12191 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12192 else if (handle_class == mono_defaults.methodhandle_class)
12193 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12195 g_assert_not_reached ();
12198 if ((cfg->opt & MONO_OPT_SHARED) &&
12199 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12200 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12201 MonoInst *addr, *vtvar, *iargs [3];
12202 int method_context_used;
12204 method_context_used = mini_method_check_context_used (cfg, method);
12206 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12208 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12209 EMIT_NEW_ICONST (cfg, iargs [1], n);
12210 if (method_context_used) {
12211 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12212 method, MONO_RGCTX_INFO_METHOD);
12213 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12215 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12216 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12218 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12222 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12224 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12225 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12226 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12227 (cmethod->klass == mono_defaults.systemtype_class) &&
12228 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12229 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12231 mono_class_init (tclass);
12232 if (context_used) {
12233 ins = emit_get_rgctx_klass (cfg, context_used,
12234 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12235 } else if (cfg->compile_aot) {
12236 if (method->wrapper_type) {
12237 mono_error_init (&error); //got to do it since there are multiple conditionals below
12238 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12239 /* Special case for static synchronized wrappers */
12240 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12242 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12243 /* FIXME: n is not a normal token */
12245 EMIT_NEW_PCONST (cfg, ins, NULL);
12248 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12252 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &error);
12253 mono_error_raise_exception (&error); /* FIXME don't raise here */
12255 EMIT_NEW_PCONST (cfg, ins, rt);
12257 ins->type = STACK_OBJ;
12258 ins->klass = cmethod->klass;
12261 MonoInst *addr, *vtvar;
12263 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12265 if (context_used) {
12266 if (handle_class == mono_defaults.typehandle_class) {
12267 ins = emit_get_rgctx_klass (cfg, context_used,
12268 mono_class_from_mono_type ((MonoType *)handle),
12269 MONO_RGCTX_INFO_TYPE);
12270 } else if (handle_class == mono_defaults.methodhandle_class) {
12271 ins = emit_get_rgctx_method (cfg, context_used,
12272 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12273 } else if (handle_class == mono_defaults.fieldhandle_class) {
12274 ins = emit_get_rgctx_field (cfg, context_used,
12275 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12277 g_assert_not_reached ();
12279 } else if (cfg->compile_aot) {
12280 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12282 EMIT_NEW_PCONST (cfg, ins, handle);
12284 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12285 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12286 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12296 MONO_INST_NEW (cfg, ins, OP_THROW);
12298 ins->sreg1 = sp [0]->dreg;
12300 cfg->cbb->out_of_line = TRUE;
12301 MONO_ADD_INS (cfg->cbb, ins);
12302 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12303 MONO_ADD_INS (cfg->cbb, ins);
12306 link_bblock (cfg, cfg->cbb, end_bblock);
12307 start_new_bblock = 1;
12308 /* This can complicate code generation for llvm since the return value might not be defined */
12309 if (COMPILE_LLVM (cfg))
12310 INLINE_FAILURE ("throw");
12312 case CEE_ENDFINALLY:
12313 /* mono_save_seq_point_info () depends on this */
12314 if (sp != stack_start)
12315 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12316 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12317 MONO_ADD_INS (cfg->cbb, ins);
12319 start_new_bblock = 1;
12322 * Control will leave the method so empty the stack, otherwise
12323 * the next basic block will start with a nonempty stack.
12325 while (sp != stack_start) {
12330 case CEE_LEAVE_S: {
12333 if (*ip == CEE_LEAVE) {
12335 target = ip + 5 + (gint32)read32(ip + 1);
12338 target = ip + 2 + (signed char)(ip [1]);
12341 /* empty the stack */
12342 while (sp != stack_start) {
12347 * If this leave statement is in a catch block, check for a
12348 * pending exception, and rethrow it if necessary.
12349 * We avoid doing this in runtime invoke wrappers, since those are called
12350 * by native code which excepts the wrapper to catch all exceptions.
12352 for (i = 0; i < header->num_clauses; ++i) {
12353 MonoExceptionClause *clause = &header->clauses [i];
12356 * Use <= in the final comparison to handle clauses with multiple
12357 * leave statements, like in bug #78024.
12358 * The ordering of the exception clauses guarantees that we find the
12359 * innermost clause.
12361 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12363 MonoBasicBlock *dont_throw;
12368 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12371 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12373 NEW_BBLOCK (cfg, dont_throw);
12376 * Currently, we always rethrow the abort exception, despite the
12377 * fact that this is not correct. See thread6.cs for an example.
12378 * But propagating the abort exception is more important than
12379 * getting the sematics right.
12381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12383 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12385 MONO_START_BB (cfg, dont_throw);
12390 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12393 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12395 MonoExceptionClause *clause;
12397 for (tmp = handlers; tmp; tmp = tmp->next) {
12398 clause = (MonoExceptionClause *)tmp->data;
12399 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12401 link_bblock (cfg, cfg->cbb, tblock);
12402 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12403 ins->inst_target_bb = tblock;
12404 ins->inst_eh_block = clause;
12405 MONO_ADD_INS (cfg->cbb, ins);
12406 cfg->cbb->has_call_handler = 1;
12407 if (COMPILE_LLVM (cfg)) {
12408 MonoBasicBlock *target_bb;
12411 * Link the finally bblock with the target, since it will
12412 * conceptually branch there.
12414 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12415 GET_BBLOCK (cfg, target_bb, target);
12416 link_bblock (cfg, tblock, target_bb);
12419 g_list_free (handlers);
12422 MONO_INST_NEW (cfg, ins, OP_BR);
12423 MONO_ADD_INS (cfg->cbb, ins);
12424 GET_BBLOCK (cfg, tblock, target);
12425 link_bblock (cfg, cfg->cbb, tblock);
12426 ins->inst_target_bb = tblock;
12428 start_new_bblock = 1;
12430 if (*ip == CEE_LEAVE)
12439 * Mono specific opcodes
12441 case MONO_CUSTOM_PREFIX: {
12443 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12447 case CEE_MONO_ICALL: {
12449 MonoJitICallInfo *info;
12451 token = read32 (ip + 2);
12452 func = mono_method_get_wrapper_data (method, token);
12453 info = mono_find_jit_icall_by_addr (func);
12455 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12458 CHECK_STACK (info->sig->param_count);
12459 sp -= info->sig->param_count;
12461 ins = mono_emit_jit_icall (cfg, info->func, sp);
12462 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12466 inline_costs += 10 * num_calls++;
12470 case CEE_MONO_LDPTR_CARD_TABLE:
12471 case CEE_MONO_LDPTR_NURSERY_START:
12472 case CEE_MONO_LDPTR_NURSERY_BITS:
12473 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12474 CHECK_STACK_OVF (1);
12477 case CEE_MONO_LDPTR_CARD_TABLE:
12478 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12480 case CEE_MONO_LDPTR_NURSERY_START:
12481 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12483 case CEE_MONO_LDPTR_NURSERY_BITS:
12484 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12486 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12487 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12493 inline_costs += 10 * num_calls++;
12496 case CEE_MONO_LDPTR: {
12499 CHECK_STACK_OVF (1);
12501 token = read32 (ip + 2);
12503 ptr = mono_method_get_wrapper_data (method, token);
12504 EMIT_NEW_PCONST (cfg, ins, ptr);
12507 inline_costs += 10 * num_calls++;
12508 /* Can't embed random pointers into AOT code */
12512 case CEE_MONO_JIT_ICALL_ADDR: {
12513 MonoJitICallInfo *callinfo;
12516 CHECK_STACK_OVF (1);
12518 token = read32 (ip + 2);
12520 ptr = mono_method_get_wrapper_data (method, token);
12521 callinfo = mono_find_jit_icall_by_addr (ptr);
12522 g_assert (callinfo);
12523 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12526 inline_costs += 10 * num_calls++;
12529 case CEE_MONO_ICALL_ADDR: {
12530 MonoMethod *cmethod;
12533 CHECK_STACK_OVF (1);
12535 token = read32 (ip + 2);
12537 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12539 if (cfg->compile_aot) {
12540 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12542 ptr = mono_lookup_internal_call (cmethod);
12544 EMIT_NEW_PCONST (cfg, ins, ptr);
12550 case CEE_MONO_VTADDR: {
12551 MonoInst *src_var, *src;
12557 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12558 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12563 case CEE_MONO_NEWOBJ: {
12564 MonoInst *iargs [2];
12566 CHECK_STACK_OVF (1);
12568 token = read32 (ip + 2);
12569 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12570 mono_class_init (klass);
12571 NEW_DOMAINCONST (cfg, iargs [0]);
12572 MONO_ADD_INS (cfg->cbb, iargs [0]);
12573 NEW_CLASSCONST (cfg, iargs [1], klass);
12574 MONO_ADD_INS (cfg->cbb, iargs [1]);
12575 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12577 inline_costs += 10 * num_calls++;
12580 case CEE_MONO_OBJADDR:
12583 MONO_INST_NEW (cfg, ins, OP_MOVE);
12584 ins->dreg = alloc_ireg_mp (cfg);
12585 ins->sreg1 = sp [0]->dreg;
12586 ins->type = STACK_MP;
12587 MONO_ADD_INS (cfg->cbb, ins);
12591 case CEE_MONO_LDNATIVEOBJ:
12593 * Similar to LDOBJ, but instead load the unmanaged
12594 * representation of the vtype to the stack.
12599 token = read32 (ip + 2);
12600 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12601 g_assert (klass->valuetype);
12602 mono_class_init (klass);
12605 MonoInst *src, *dest, *temp;
12608 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12609 temp->backend.is_pinvoke = 1;
12610 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12611 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12613 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12614 dest->type = STACK_VTYPE;
12615 dest->klass = klass;
12621 case CEE_MONO_RETOBJ: {
12623 * Same as RET, but return the native representation of a vtype
12626 g_assert (cfg->ret);
12627 g_assert (mono_method_signature (method)->pinvoke);
12632 token = read32 (ip + 2);
12633 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12635 if (!cfg->vret_addr) {
12636 g_assert (cfg->ret_var_is_local);
12638 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12640 EMIT_NEW_RETLOADA (cfg, ins);
12642 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12644 if (sp != stack_start)
12647 MONO_INST_NEW (cfg, ins, OP_BR);
12648 ins->inst_target_bb = end_bblock;
12649 MONO_ADD_INS (cfg->cbb, ins);
12650 link_bblock (cfg, cfg->cbb, end_bblock);
12651 start_new_bblock = 1;
12655 case CEE_MONO_CISINST:
12656 case CEE_MONO_CCASTCLASS: {
12661 token = read32 (ip + 2);
12662 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12663 if (ip [1] == CEE_MONO_CISINST)
12664 ins = handle_cisinst (cfg, klass, sp [0]);
12666 ins = handle_ccastclass (cfg, klass, sp [0]);
12671 case CEE_MONO_SAVE_LMF:
12672 case CEE_MONO_RESTORE_LMF:
12675 case CEE_MONO_CLASSCONST:
12676 CHECK_STACK_OVF (1);
12678 token = read32 (ip + 2);
12679 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12682 inline_costs += 10 * num_calls++;
12684 case CEE_MONO_NOT_TAKEN:
12685 cfg->cbb->out_of_line = TRUE;
12688 case CEE_MONO_TLS: {
12691 CHECK_STACK_OVF (1);
12693 key = (MonoTlsKey)read32 (ip + 2);
12694 g_assert (key < TLS_KEY_NUM);
12696 ins = mono_create_tls_get (cfg, key);
12698 if (cfg->compile_aot) {
12700 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12701 ins->dreg = alloc_preg (cfg);
12702 ins->type = STACK_PTR;
12704 g_assert_not_reached ();
12707 ins->type = STACK_PTR;
12708 MONO_ADD_INS (cfg->cbb, ins);
12713 case CEE_MONO_DYN_CALL: {
12714 MonoCallInst *call;
12716 /* It would be easier to call a trampoline, but that would put an
12717 * extra frame on the stack, confusing exception handling. So
12718 * implement it inline using an opcode for now.
12721 if (!cfg->dyn_call_var) {
12722 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12723 /* prevent it from being register allocated */
12724 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12727 /* Has to use a call inst since it local regalloc expects it */
12728 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12729 ins = (MonoInst*)call;
12731 ins->sreg1 = sp [0]->dreg;
12732 ins->sreg2 = sp [1]->dreg;
12733 MONO_ADD_INS (cfg->cbb, ins);
12735 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12738 inline_costs += 10 * num_calls++;
12742 case CEE_MONO_MEMORY_BARRIER: {
12744 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12748 case CEE_MONO_JIT_ATTACH: {
12749 MonoInst *args [16], *domain_ins;
12750 MonoInst *ad_ins, *jit_tls_ins;
12751 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12753 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12755 EMIT_NEW_PCONST (cfg, ins, NULL);
12756 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12758 ad_ins = mono_get_domain_intrinsic (cfg);
12759 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12761 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12762 NEW_BBLOCK (cfg, next_bb);
12763 NEW_BBLOCK (cfg, call_bb);
12765 if (cfg->compile_aot) {
12766 /* AOT code is only used in the root domain */
12767 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12769 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12771 MONO_ADD_INS (cfg->cbb, ad_ins);
12772 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12773 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12775 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12776 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12777 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12779 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12780 MONO_START_BB (cfg, call_bb);
12783 if (cfg->compile_aot) {
12784 /* AOT code is only used in the root domain */
12785 EMIT_NEW_PCONST (cfg, args [0], NULL);
12787 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12789 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12790 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12793 MONO_START_BB (cfg, next_bb);
12797 case CEE_MONO_JIT_DETACH: {
12798 MonoInst *args [16];
12800 /* Restore the original domain */
12801 dreg = alloc_ireg (cfg);
12802 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12803 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12807 case CEE_MONO_CALLI_EXTRA_ARG: {
12809 MonoMethodSignature *fsig;
12813 * This is the same as CEE_CALLI, but passes an additional argument
12814 * to the called method in llvmonly mode.
12815 * This is only used by delegate invoke wrappers to call the
12816 * actual delegate method.
12818 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12821 token = read32 (ip + 2);
12829 fsig = mini_get_signature (method, token, generic_context);
12831 if (cfg->llvm_only)
12832 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12834 n = fsig->param_count + fsig->hasthis + 1;
12841 if (cfg->llvm_only) {
12843 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12844 * cconv. This is set by mono_init_delegate ().
12846 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12847 MonoInst *callee = addr;
12848 MonoInst *call, *localloc_ins;
12849 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12850 int low_bit_reg = alloc_preg (cfg);
12852 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12853 NEW_BBLOCK (cfg, end_bb);
12855 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12856 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12859 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12860 addr = emit_get_rgctx_sig (cfg, context_used,
12861 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12863 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12865 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12866 ins->dreg = alloc_preg (cfg);
12867 ins->inst_imm = 2 * SIZEOF_VOID_P;
12868 MONO_ADD_INS (cfg->cbb, ins);
12869 localloc_ins = ins;
12870 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12874 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12875 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12877 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12878 MONO_START_BB (cfg, is_gsharedvt_bb);
12879 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12880 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12881 ins->dreg = call->dreg;
12883 MONO_START_BB (cfg, end_bb);
12885 /* Caller uses a normal calling conv */
12887 MonoInst *callee = addr;
12888 MonoInst *call, *localloc_ins;
12889 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12890 int low_bit_reg = alloc_preg (cfg);
12892 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12893 NEW_BBLOCK (cfg, end_bb);
12895 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12897 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12899 /* Normal case: callee uses a normal cconv, no conversion is needed */
12900 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12901 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12902 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12903 MONO_START_BB (cfg, is_gsharedvt_bb);
12904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12905 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12906 MONO_ADD_INS (cfg->cbb, addr);
12908 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12910 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12911 ins->dreg = alloc_preg (cfg);
12912 ins->inst_imm = 2 * SIZEOF_VOID_P;
12913 MONO_ADD_INS (cfg->cbb, ins);
12914 localloc_ins = ins;
12915 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12916 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12919 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12920 ins->dreg = call->dreg;
12921 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12923 MONO_START_BB (cfg, end_bb);
12926 /* Same as CEE_CALLI */
12927 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12929 * We pass the address to the gsharedvt trampoline in the rgctx reg
12931 MonoInst *callee = addr;
12933 addr = emit_get_rgctx_sig (cfg, context_used,
12934 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12935 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12937 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12941 if (!MONO_TYPE_IS_VOID (fsig->ret))
12942 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12944 CHECK_CFG_EXCEPTION;
12948 constrained_class = NULL;
12952 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12958 case CEE_PREFIX1: {
12961 case CEE_ARGLIST: {
12962 /* somewhat similar to LDTOKEN */
12963 MonoInst *addr, *vtvar;
12964 CHECK_STACK_OVF (1);
12965 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12967 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12968 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12970 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12971 ins->type = STACK_VTYPE;
12972 ins->klass = mono_defaults.argumenthandle_class;
12982 MonoInst *cmp, *arg1, *arg2;
12990 * The following transforms:
12991 * CEE_CEQ into OP_CEQ
12992 * CEE_CGT into OP_CGT
12993 * CEE_CGT_UN into OP_CGT_UN
12994 * CEE_CLT into OP_CLT
12995 * CEE_CLT_UN into OP_CLT_UN
12997 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12999 MONO_INST_NEW (cfg, ins, cmp->opcode);
13000 cmp->sreg1 = arg1->dreg;
13001 cmp->sreg2 = arg2->dreg;
13002 type_from_op (cfg, cmp, arg1, arg2);
13004 add_widen_op (cfg, cmp, &arg1, &arg2);
13005 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13006 cmp->opcode = OP_LCOMPARE;
13007 else if (arg1->type == STACK_R4)
13008 cmp->opcode = OP_RCOMPARE;
13009 else if (arg1->type == STACK_R8)
13010 cmp->opcode = OP_FCOMPARE;
13012 cmp->opcode = OP_ICOMPARE;
13013 MONO_ADD_INS (cfg->cbb, cmp);
13014 ins->type = STACK_I4;
13015 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13016 type_from_op (cfg, ins, arg1, arg2);
13018 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13020 * The backends expect the fceq opcodes to do the
13023 ins->sreg1 = cmp->sreg1;
13024 ins->sreg2 = cmp->sreg2;
13027 MONO_ADD_INS (cfg->cbb, ins);
13033 MonoInst *argconst;
13034 MonoMethod *cil_method;
13036 CHECK_STACK_OVF (1);
13038 n = read32 (ip + 2);
13039 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13042 mono_class_init (cmethod->klass);
13044 mono_save_token_info (cfg, image, n, cmethod);
13046 context_used = mini_method_check_context_used (cfg, cmethod);
13048 cil_method = cmethod;
13049 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13050 METHOD_ACCESS_FAILURE (method, cil_method);
13052 if (mono_security_core_clr_enabled ())
13053 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13056 * Optimize the common case of ldftn+delegate creation
13058 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13059 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13060 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13061 MonoInst *target_ins, *handle_ins;
13062 MonoMethod *invoke;
13063 int invoke_context_used;
13065 invoke = mono_get_delegate_invoke (ctor_method->klass);
13066 if (!invoke || !mono_method_signature (invoke))
13069 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13071 target_ins = sp [-1];
13073 if (mono_security_core_clr_enabled ())
13074 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13076 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13077 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13078 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13080 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13084 /* FIXME: SGEN support */
13085 if (invoke_context_used == 0 || cfg->llvm_only) {
13087 if (cfg->verbose_level > 3)
13088 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13089 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13092 CHECK_CFG_EXCEPTION;
13102 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13103 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13107 inline_costs += 10 * num_calls++;
13110 case CEE_LDVIRTFTN: {
13111 MonoInst *args [2];
13115 n = read32 (ip + 2);
13116 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13119 mono_class_init (cmethod->klass);
13121 context_used = mini_method_check_context_used (cfg, cmethod);
13123 if (mono_security_core_clr_enabled ())
13124 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13127 * Optimize the common case of ldvirtftn+delegate creation
13129 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13130 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13131 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13132 MonoInst *target_ins, *handle_ins;
13133 MonoMethod *invoke;
13134 int invoke_context_used;
13135 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13137 invoke = mono_get_delegate_invoke (ctor_method->klass);
13138 if (!invoke || !mono_method_signature (invoke))
13141 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13143 target_ins = sp [-1];
13145 if (mono_security_core_clr_enabled ())
13146 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13148 /* FIXME: SGEN support */
13149 if (invoke_context_used == 0 || cfg->llvm_only) {
13151 if (cfg->verbose_level > 3)
13152 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13153 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13156 CHECK_CFG_EXCEPTION;
13169 args [1] = emit_get_rgctx_method (cfg, context_used,
13170 cmethod, MONO_RGCTX_INFO_METHOD);
13173 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13175 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13178 inline_costs += 10 * num_calls++;
13182 CHECK_STACK_OVF (1);
13184 n = read16 (ip + 2);
13186 EMIT_NEW_ARGLOAD (cfg, ins, n);
13191 CHECK_STACK_OVF (1);
13193 n = read16 (ip + 2);
13195 NEW_ARGLOADA (cfg, ins, n);
13196 MONO_ADD_INS (cfg->cbb, ins);
13204 n = read16 (ip + 2);
13206 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13208 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13212 CHECK_STACK_OVF (1);
13214 n = read16 (ip + 2);
13216 EMIT_NEW_LOCLOAD (cfg, ins, n);
13221 unsigned char *tmp_ip;
13222 CHECK_STACK_OVF (1);
13224 n = read16 (ip + 2);
13227 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13233 EMIT_NEW_LOCLOADA (cfg, ins, n);
13242 n = read16 (ip + 2);
13244 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13246 emit_stloc_ir (cfg, sp, header, n);
13253 if (sp != stack_start)
13255 if (cfg->method != method)
13257 * Inlining this into a loop in a parent could lead to
13258 * stack overflows which is different behavior than the
13259 * non-inlined case, thus disable inlining in this case.
13261 INLINE_FAILURE("localloc");
13263 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13264 ins->dreg = alloc_preg (cfg);
13265 ins->sreg1 = sp [0]->dreg;
13266 ins->type = STACK_PTR;
13267 MONO_ADD_INS (cfg->cbb, ins);
13269 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13271 ins->flags |= MONO_INST_INIT;
13276 case CEE_ENDFILTER: {
13277 MonoExceptionClause *clause, *nearest;
13282 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13284 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13285 ins->sreg1 = (*sp)->dreg;
13286 MONO_ADD_INS (cfg->cbb, ins);
13287 start_new_bblock = 1;
13291 for (cc = 0; cc < header->num_clauses; ++cc) {
13292 clause = &header->clauses [cc];
13293 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13294 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13295 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13298 g_assert (nearest);
13299 if ((ip - header->code) != nearest->handler_offset)
13304 case CEE_UNALIGNED_:
13305 ins_flag |= MONO_INST_UNALIGNED;
13306 /* FIXME: record alignment? we can assume 1 for now */
13310 case CEE_VOLATILE_:
13311 ins_flag |= MONO_INST_VOLATILE;
13315 ins_flag |= MONO_INST_TAILCALL;
13316 cfg->flags |= MONO_CFG_HAS_TAIL;
13317 /* Can't inline tail calls at this time */
13318 inline_costs += 100000;
13325 token = read32 (ip + 2);
13326 klass = mini_get_class (method, token, generic_context);
13327 CHECK_TYPELOAD (klass);
13328 if (generic_class_is_reference_type (cfg, klass))
13329 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13331 mini_emit_initobj (cfg, *sp, NULL, klass);
13335 case CEE_CONSTRAINED_:
13337 token = read32 (ip + 2);
13338 constrained_class = mini_get_class (method, token, generic_context);
13339 CHECK_TYPELOAD (constrained_class);
13343 case CEE_INITBLK: {
13344 MonoInst *iargs [3];
13348 /* Skip optimized paths for volatile operations. */
13349 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13350 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13351 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13352 /* emit_memset only works when val == 0 */
13353 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13356 iargs [0] = sp [0];
13357 iargs [1] = sp [1];
13358 iargs [2] = sp [2];
13359 if (ip [1] == CEE_CPBLK) {
13361 * FIXME: It's unclear whether we should be emitting both the acquire
13362 * and release barriers for cpblk. It is technically both a load and
13363 * store operation, so it seems like that's the sensible thing to do.
13365 * FIXME: We emit full barriers on both sides of the operation for
13366 * simplicity. We should have a separate atomic memcpy method instead.
13368 MonoMethod *memcpy_method = get_memcpy_method ();
13370 if (ins_flag & MONO_INST_VOLATILE)
13371 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13373 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13374 call->flags |= ins_flag;
13376 if (ins_flag & MONO_INST_VOLATILE)
13377 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13379 MonoMethod *memset_method = get_memset_method ();
13380 if (ins_flag & MONO_INST_VOLATILE) {
13381 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13382 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13384 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13385 call->flags |= ins_flag;
13396 ins_flag |= MONO_INST_NOTYPECHECK;
13398 ins_flag |= MONO_INST_NORANGECHECK;
13399 /* we ignore the no-nullcheck for now since we
13400 * really do it explicitly only when doing callvirt->call
13404 case CEE_RETHROW: {
13406 int handler_offset = -1;
13408 for (i = 0; i < header->num_clauses; ++i) {
13409 MonoExceptionClause *clause = &header->clauses [i];
13410 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13411 handler_offset = clause->handler_offset;
13416 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13418 if (handler_offset == -1)
13421 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13422 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13423 ins->sreg1 = load->dreg;
13424 MONO_ADD_INS (cfg->cbb, ins);
13426 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13427 MONO_ADD_INS (cfg->cbb, ins);
13430 link_bblock (cfg, cfg->cbb, end_bblock);
13431 start_new_bblock = 1;
13439 CHECK_STACK_OVF (1);
13441 token = read32 (ip + 2);
13442 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13443 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13446 val = mono_type_size (type, &ialign);
13448 MonoClass *klass = mini_get_class (method, token, generic_context);
13449 CHECK_TYPELOAD (klass);
13451 val = mono_type_size (&klass->byval_arg, &ialign);
13453 if (mini_is_gsharedvt_klass (klass))
13454 GSHAREDVT_FAILURE (*ip);
13456 EMIT_NEW_ICONST (cfg, ins, val);
13461 case CEE_REFANYTYPE: {
13462 MonoInst *src_var, *src;
13464 GSHAREDVT_FAILURE (*ip);
13470 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13472 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13473 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13474 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13479 case CEE_READONLY_:
13492 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13502 g_warning ("opcode 0x%02x not handled", *ip);
13506 if (start_new_bblock != 1)
13509 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13510 if (cfg->cbb->next_bb) {
13511 /* This could already be set because of inlining, #693905 */
13512 MonoBasicBlock *bb = cfg->cbb;
13514 while (bb->next_bb)
13516 bb->next_bb = end_bblock;
13518 cfg->cbb->next_bb = end_bblock;
13521 if (cfg->method == method && cfg->domainvar) {
13523 MonoInst *get_domain;
13525 cfg->cbb = init_localsbb;
13527 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13528 MONO_ADD_INS (cfg->cbb, get_domain);
13530 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13532 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13533 MONO_ADD_INS (cfg->cbb, store);
13536 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13537 if (cfg->compile_aot)
13538 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13539 mono_get_got_var (cfg);
13542 if (cfg->method == method && cfg->got_var)
13543 mono_emit_load_got_addr (cfg);
13545 if (init_localsbb) {
13546 cfg->cbb = init_localsbb;
13548 for (i = 0; i < header->num_locals; ++i) {
13549 emit_init_local (cfg, i, header->locals [i], init_locals);
13553 if (cfg->init_ref_vars && cfg->method == method) {
13554 /* Emit initialization for ref vars */
13555 // FIXME: Avoid duplication initialization for IL locals.
13556 for (i = 0; i < cfg->num_varinfo; ++i) {
13557 MonoInst *ins = cfg->varinfo [i];
13559 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13560 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13564 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13565 cfg->cbb = init_localsbb;
13566 emit_push_lmf (cfg);
13569 cfg->cbb = init_localsbb;
13570 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13573 MonoBasicBlock *bb;
13576 * Make seq points at backward branch targets interruptable.
13578 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13579 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13580 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13583 /* Add a sequence point for method entry/exit events */
13584 if (seq_points && cfg->gen_sdb_seq_points) {
13585 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13586 MONO_ADD_INS (init_localsbb, ins);
13587 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13588 MONO_ADD_INS (cfg->bb_exit, ins);
13592 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13593 * the code they refer to was dead (#11880).
13595 if (sym_seq_points) {
13596 for (i = 0; i < header->code_size; ++i) {
13597 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13600 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13601 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13608 if (cfg->method == method) {
13609 MonoBasicBlock *bb;
13610 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13611 bb->region = mono_find_block_region (cfg, bb->real_offset);
13613 mono_create_spvar_for_region (cfg, bb->region);
13614 if (cfg->verbose_level > 2)
13615 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13619 if (inline_costs < 0) {
13622 /* Method is too large */
13623 mname = mono_method_full_name (method, TRUE);
13624 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13628 if ((cfg->verbose_level > 2) && (cfg->method == method))
13629 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13634 g_assert (!mono_error_ok (&cfg->error));
13638 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13642 set_exception_type_from_invalid_il (cfg, method, ip);
13646 g_slist_free (class_inits);
13647 mono_basic_block_free (original_bb);
13648 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13649 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13650 if (cfg->exception_type)
13653 return inline_costs;
13657 store_membase_reg_to_store_membase_imm (int opcode)
13660 case OP_STORE_MEMBASE_REG:
13661 return OP_STORE_MEMBASE_IMM;
13662 case OP_STOREI1_MEMBASE_REG:
13663 return OP_STOREI1_MEMBASE_IMM;
13664 case OP_STOREI2_MEMBASE_REG:
13665 return OP_STOREI2_MEMBASE_IMM;
13666 case OP_STOREI4_MEMBASE_REG:
13667 return OP_STOREI4_MEMBASE_IMM;
13668 case OP_STOREI8_MEMBASE_REG:
13669 return OP_STOREI8_MEMBASE_IMM;
13671 g_assert_not_reached ();
13678 mono_op_to_op_imm (int opcode)
13682 return OP_IADD_IMM;
13684 return OP_ISUB_IMM;
13686 return OP_IDIV_IMM;
13688 return OP_IDIV_UN_IMM;
13690 return OP_IREM_IMM;
13692 return OP_IREM_UN_IMM;
13694 return OP_IMUL_IMM;
13696 return OP_IAND_IMM;
13700 return OP_IXOR_IMM;
13702 return OP_ISHL_IMM;
13704 return OP_ISHR_IMM;
13706 return OP_ISHR_UN_IMM;
13709 return OP_LADD_IMM;
13711 return OP_LSUB_IMM;
13713 return OP_LAND_IMM;
13717 return OP_LXOR_IMM;
13719 return OP_LSHL_IMM;
13721 return OP_LSHR_IMM;
13723 return OP_LSHR_UN_IMM;
13724 #if SIZEOF_REGISTER == 8
13726 return OP_LREM_IMM;
13730 return OP_COMPARE_IMM;
13732 return OP_ICOMPARE_IMM;
13734 return OP_LCOMPARE_IMM;
13736 case OP_STORE_MEMBASE_REG:
13737 return OP_STORE_MEMBASE_IMM;
13738 case OP_STOREI1_MEMBASE_REG:
13739 return OP_STOREI1_MEMBASE_IMM;
13740 case OP_STOREI2_MEMBASE_REG:
13741 return OP_STOREI2_MEMBASE_IMM;
13742 case OP_STOREI4_MEMBASE_REG:
13743 return OP_STOREI4_MEMBASE_IMM;
13745 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13747 return OP_X86_PUSH_IMM;
13748 case OP_X86_COMPARE_MEMBASE_REG:
13749 return OP_X86_COMPARE_MEMBASE_IMM;
13751 #if defined(TARGET_AMD64)
13752 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13753 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13755 case OP_VOIDCALL_REG:
13756 return OP_VOIDCALL;
13764 return OP_LOCALLOC_IMM;
13771 ldind_to_load_membase (int opcode)
13775 return OP_LOADI1_MEMBASE;
13777 return OP_LOADU1_MEMBASE;
13779 return OP_LOADI2_MEMBASE;
13781 return OP_LOADU2_MEMBASE;
13783 return OP_LOADI4_MEMBASE;
13785 return OP_LOADU4_MEMBASE;
13787 return OP_LOAD_MEMBASE;
13788 case CEE_LDIND_REF:
13789 return OP_LOAD_MEMBASE;
13791 return OP_LOADI8_MEMBASE;
13793 return OP_LOADR4_MEMBASE;
13795 return OP_LOADR8_MEMBASE;
13797 g_assert_not_reached ();
13804 stind_to_store_membase (int opcode)
13808 return OP_STOREI1_MEMBASE_REG;
13810 return OP_STOREI2_MEMBASE_REG;
13812 return OP_STOREI4_MEMBASE_REG;
13814 case CEE_STIND_REF:
13815 return OP_STORE_MEMBASE_REG;
13817 return OP_STOREI8_MEMBASE_REG;
13819 return OP_STORER4_MEMBASE_REG;
13821 return OP_STORER8_MEMBASE_REG;
13823 g_assert_not_reached ();
13830 mono_load_membase_to_load_mem (int opcode)
13832 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13833 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13835 case OP_LOAD_MEMBASE:
13836 return OP_LOAD_MEM;
13837 case OP_LOADU1_MEMBASE:
13838 return OP_LOADU1_MEM;
13839 case OP_LOADU2_MEMBASE:
13840 return OP_LOADU2_MEM;
13841 case OP_LOADI4_MEMBASE:
13842 return OP_LOADI4_MEM;
13843 case OP_LOADU4_MEMBASE:
13844 return OP_LOADU4_MEM;
13845 #if SIZEOF_REGISTER == 8
13846 case OP_LOADI8_MEMBASE:
13847 return OP_LOADI8_MEM;
13856 op_to_op_dest_membase (int store_opcode, int opcode)
13858 #if defined(TARGET_X86)
13859 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13864 return OP_X86_ADD_MEMBASE_REG;
13866 return OP_X86_SUB_MEMBASE_REG;
13868 return OP_X86_AND_MEMBASE_REG;
13870 return OP_X86_OR_MEMBASE_REG;
13872 return OP_X86_XOR_MEMBASE_REG;
13875 return OP_X86_ADD_MEMBASE_IMM;
13878 return OP_X86_SUB_MEMBASE_IMM;
13881 return OP_X86_AND_MEMBASE_IMM;
13884 return OP_X86_OR_MEMBASE_IMM;
13887 return OP_X86_XOR_MEMBASE_IMM;
13893 #if defined(TARGET_AMD64)
13894 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13899 return OP_X86_ADD_MEMBASE_REG;
13901 return OP_X86_SUB_MEMBASE_REG;
13903 return OP_X86_AND_MEMBASE_REG;
13905 return OP_X86_OR_MEMBASE_REG;
13907 return OP_X86_XOR_MEMBASE_REG;
13909 return OP_X86_ADD_MEMBASE_IMM;
13911 return OP_X86_SUB_MEMBASE_IMM;
13913 return OP_X86_AND_MEMBASE_IMM;
13915 return OP_X86_OR_MEMBASE_IMM;
13917 return OP_X86_XOR_MEMBASE_IMM;
13919 return OP_AMD64_ADD_MEMBASE_REG;
13921 return OP_AMD64_SUB_MEMBASE_REG;
13923 return OP_AMD64_AND_MEMBASE_REG;
13925 return OP_AMD64_OR_MEMBASE_REG;
13927 return OP_AMD64_XOR_MEMBASE_REG;
13930 return OP_AMD64_ADD_MEMBASE_IMM;
13933 return OP_AMD64_SUB_MEMBASE_IMM;
13936 return OP_AMD64_AND_MEMBASE_IMM;
13939 return OP_AMD64_OR_MEMBASE_IMM;
13942 return OP_AMD64_XOR_MEMBASE_IMM;
13952 op_to_op_store_membase (int store_opcode, int opcode)
13954 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13957 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13958 return OP_X86_SETEQ_MEMBASE;
13960 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13961 return OP_X86_SETNE_MEMBASE;
13969 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13972 /* FIXME: This has sign extension issues */
13974 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13975 return OP_X86_COMPARE_MEMBASE8_IMM;
13978 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13983 return OP_X86_PUSH_MEMBASE;
13984 case OP_COMPARE_IMM:
13985 case OP_ICOMPARE_IMM:
13986 return OP_X86_COMPARE_MEMBASE_IMM;
13989 return OP_X86_COMPARE_MEMBASE_REG;
13993 #ifdef TARGET_AMD64
13994 /* FIXME: This has sign extension issues */
13996 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13997 return OP_X86_COMPARE_MEMBASE8_IMM;
14002 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14003 return OP_X86_PUSH_MEMBASE;
14005 /* FIXME: This only works for 32 bit immediates
14006 case OP_COMPARE_IMM:
14007 case OP_LCOMPARE_IMM:
14008 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14009 return OP_AMD64_COMPARE_MEMBASE_IMM;
14011 case OP_ICOMPARE_IMM:
14012 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14013 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14017 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14018 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14019 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14020 return OP_AMD64_COMPARE_MEMBASE_REG;
14023 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14024 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14033 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14036 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14042 return OP_X86_COMPARE_REG_MEMBASE;
14044 return OP_X86_ADD_REG_MEMBASE;
14046 return OP_X86_SUB_REG_MEMBASE;
14048 return OP_X86_AND_REG_MEMBASE;
14050 return OP_X86_OR_REG_MEMBASE;
14052 return OP_X86_XOR_REG_MEMBASE;
14056 #ifdef TARGET_AMD64
14057 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14060 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14062 return OP_X86_ADD_REG_MEMBASE;
14064 return OP_X86_SUB_REG_MEMBASE;
14066 return OP_X86_AND_REG_MEMBASE;
14068 return OP_X86_OR_REG_MEMBASE;
14070 return OP_X86_XOR_REG_MEMBASE;
14072 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14076 return OP_AMD64_COMPARE_REG_MEMBASE;
14078 return OP_AMD64_ADD_REG_MEMBASE;
14080 return OP_AMD64_SUB_REG_MEMBASE;
14082 return OP_AMD64_AND_REG_MEMBASE;
14084 return OP_AMD64_OR_REG_MEMBASE;
14086 return OP_AMD64_XOR_REG_MEMBASE;
14095 mono_op_to_op_imm_noemul (int opcode)
14098 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14104 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14111 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14116 return mono_op_to_op_imm (opcode);
14121 * mono_handle_global_vregs:
14123 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14127 mono_handle_global_vregs (MonoCompile *cfg)
14129 gint32 *vreg_to_bb;
14130 MonoBasicBlock *bb;
14133 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14135 #ifdef MONO_ARCH_SIMD_INTRINSICS
14136 if (cfg->uses_simd_intrinsics)
14137 mono_simd_simplify_indirection (cfg);
14140 /* Find local vregs used in more than one bb */
14141 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14142 MonoInst *ins = bb->code;
14143 int block_num = bb->block_num;
14145 if (cfg->verbose_level > 2)
14146 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14149 for (; ins; ins = ins->next) {
14150 const char *spec = INS_INFO (ins->opcode);
14151 int regtype = 0, regindex;
14154 if (G_UNLIKELY (cfg->verbose_level > 2))
14155 mono_print_ins (ins);
14157 g_assert (ins->opcode >= MONO_CEE_LAST);
14159 for (regindex = 0; regindex < 4; regindex ++) {
14162 if (regindex == 0) {
14163 regtype = spec [MONO_INST_DEST];
14164 if (regtype == ' ')
14167 } else if (regindex == 1) {
14168 regtype = spec [MONO_INST_SRC1];
14169 if (regtype == ' ')
14172 } else if (regindex == 2) {
14173 regtype = spec [MONO_INST_SRC2];
14174 if (regtype == ' ')
14177 } else if (regindex == 3) {
14178 regtype = spec [MONO_INST_SRC3];
14179 if (regtype == ' ')
14184 #if SIZEOF_REGISTER == 4
14185 /* In the LLVM case, the long opcodes are not decomposed */
14186 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14188 * Since some instructions reference the original long vreg,
14189 * and some reference the two component vregs, it is quite hard
14190 * to determine when it needs to be global. So be conservative.
14192 if (!get_vreg_to_inst (cfg, vreg)) {
14193 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14195 if (cfg->verbose_level > 2)
14196 printf ("LONG VREG R%d made global.\n", vreg);
14200 * Make the component vregs volatile since the optimizations can
14201 * get confused otherwise.
14203 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14204 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14208 g_assert (vreg != -1);
14210 prev_bb = vreg_to_bb [vreg];
14211 if (prev_bb == 0) {
14212 /* 0 is a valid block num */
14213 vreg_to_bb [vreg] = block_num + 1;
14214 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14215 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14218 if (!get_vreg_to_inst (cfg, vreg)) {
14219 if (G_UNLIKELY (cfg->verbose_level > 2))
14220 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14224 if (vreg_is_ref (cfg, vreg))
14225 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14227 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14230 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14233 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14236 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14239 g_assert_not_reached ();
14243 /* Flag as having been used in more than one bb */
14244 vreg_to_bb [vreg] = -1;
14250 /* If a variable is used in only one bblock, convert it into a local vreg */
14251 for (i = 0; i < cfg->num_varinfo; i++) {
14252 MonoInst *var = cfg->varinfo [i];
14253 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14255 switch (var->type) {
14261 #if SIZEOF_REGISTER == 8
14264 #if !defined(TARGET_X86)
14265 /* Enabling this screws up the fp stack on x86 */
14268 if (mono_arch_is_soft_float ())
14272 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14276 /* Arguments are implicitly global */
14277 /* Putting R4 vars into registers doesn't work currently */
14278 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14279 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14281 * Make that the variable's liveness interval doesn't contain a call, since
14282 * that would cause the lvreg to be spilled, making the whole optimization
14285 /* This is too slow for JIT compilation */
14287 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14289 int def_index, call_index, ins_index;
14290 gboolean spilled = FALSE;
14295 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14296 const char *spec = INS_INFO (ins->opcode);
14298 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14299 def_index = ins_index;
14301 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14302 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14303 if (call_index > def_index) {
14309 if (MONO_IS_CALL (ins))
14310 call_index = ins_index;
14320 if (G_UNLIKELY (cfg->verbose_level > 2))
14321 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14322 var->flags |= MONO_INST_IS_DEAD;
14323 cfg->vreg_to_inst [var->dreg] = NULL;
14330 * Compress the varinfo and vars tables so the liveness computation is faster and
14331 * takes up less space.
14334 for (i = 0; i < cfg->num_varinfo; ++i) {
14335 MonoInst *var = cfg->varinfo [i];
14336 if (pos < i && cfg->locals_start == i)
14337 cfg->locals_start = pos;
14338 if (!(var->flags & MONO_INST_IS_DEAD)) {
14340 cfg->varinfo [pos] = cfg->varinfo [i];
14341 cfg->varinfo [pos]->inst_c0 = pos;
14342 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14343 cfg->vars [pos].idx = pos;
14344 #if SIZEOF_REGISTER == 4
14345 if (cfg->varinfo [pos]->type == STACK_I8) {
14346 /* Modify the two component vars too */
14349 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14350 var1->inst_c0 = pos;
14351 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14352 var1->inst_c0 = pos;
14359 cfg->num_varinfo = pos;
14360 if (cfg->locals_start > cfg->num_varinfo)
14361 cfg->locals_start = cfg->num_varinfo;
14365 * mono_allocate_gsharedvt_vars:
14367 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14368 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14371 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14375 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14377 for (i = 0; i < cfg->num_varinfo; ++i) {
14378 MonoInst *ins = cfg->varinfo [i];
14381 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14382 if (i >= cfg->locals_start) {
14384 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14385 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14386 ins->opcode = OP_GSHAREDVT_LOCAL;
14387 ins->inst_imm = idx;
14390 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14391 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14398 * mono_spill_global_vars:
14400 * Generate spill code for variables which are not allocated to registers,
14401 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14402 * code is generated which could be optimized by the local optimization passes.
14405 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14407 MonoBasicBlock *bb;
14409 int orig_next_vreg;
14410 guint32 *vreg_to_lvreg;
14412 guint32 i, lvregs_len;
14413 gboolean dest_has_lvreg = FALSE;
14414 MonoStackType stacktypes [128];
14415 MonoInst **live_range_start, **live_range_end;
14416 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14418 *need_local_opts = FALSE;
14420 memset (spec2, 0, sizeof (spec2));
14422 /* FIXME: Move this function to mini.c */
14423 stacktypes ['i'] = STACK_PTR;
14424 stacktypes ['l'] = STACK_I8;
14425 stacktypes ['f'] = STACK_R8;
14426 #ifdef MONO_ARCH_SIMD_INTRINSICS
14427 stacktypes ['x'] = STACK_VTYPE;
14430 #if SIZEOF_REGISTER == 4
14431 /* Create MonoInsts for longs */
14432 for (i = 0; i < cfg->num_varinfo; i++) {
14433 MonoInst *ins = cfg->varinfo [i];
14435 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14436 switch (ins->type) {
14441 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14444 g_assert (ins->opcode == OP_REGOFFSET);
14446 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14448 tree->opcode = OP_REGOFFSET;
14449 tree->inst_basereg = ins->inst_basereg;
14450 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14452 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14454 tree->opcode = OP_REGOFFSET;
14455 tree->inst_basereg = ins->inst_basereg;
14456 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14466 if (cfg->compute_gc_maps) {
14467 /* registers need liveness info even for !non refs */
14468 for (i = 0; i < cfg->num_varinfo; i++) {
14469 MonoInst *ins = cfg->varinfo [i];
14471 if (ins->opcode == OP_REGVAR)
14472 ins->flags |= MONO_INST_GC_TRACK;
14476 /* FIXME: widening and truncation */
14479 * As an optimization, when a variable allocated to the stack is first loaded into
14480 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14481 * the variable again.
14483 orig_next_vreg = cfg->next_vreg;
14484 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14485 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14489 * These arrays contain the first and last instructions accessing a given
14491 * Since we emit bblocks in the same order we process them here, and we
14492 * don't split live ranges, these will precisely describe the live range of
14493 * the variable, i.e. the instruction range where a valid value can be found
14494 * in the variables location.
14495 * The live range is computed using the liveness info computed by the liveness pass.
14496 * We can't use vmv->range, since that is an abstract live range, and we need
14497 * one which is instruction precise.
14498 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14500 /* FIXME: Only do this if debugging info is requested */
14501 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14502 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14503 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14504 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14506 /* Add spill loads/stores */
14507 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14510 if (cfg->verbose_level > 2)
14511 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14513 /* Clear vreg_to_lvreg array */
14514 for (i = 0; i < lvregs_len; i++)
14515 vreg_to_lvreg [lvregs [i]] = 0;
14519 MONO_BB_FOR_EACH_INS (bb, ins) {
14520 const char *spec = INS_INFO (ins->opcode);
14521 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14522 gboolean store, no_lvreg;
14523 int sregs [MONO_MAX_SRC_REGS];
14525 if (G_UNLIKELY (cfg->verbose_level > 2))
14526 mono_print_ins (ins);
14528 if (ins->opcode == OP_NOP)
14532 * We handle LDADDR here as well, since it can only be decomposed
14533 * when variable addresses are known.
14535 if (ins->opcode == OP_LDADDR) {
14536 MonoInst *var = (MonoInst *)ins->inst_p0;
14538 if (var->opcode == OP_VTARG_ADDR) {
14539 /* Happens on SPARC/S390 where vtypes are passed by reference */
14540 MonoInst *vtaddr = var->inst_left;
14541 if (vtaddr->opcode == OP_REGVAR) {
14542 ins->opcode = OP_MOVE;
14543 ins->sreg1 = vtaddr->dreg;
14545 else if (var->inst_left->opcode == OP_REGOFFSET) {
14546 ins->opcode = OP_LOAD_MEMBASE;
14547 ins->inst_basereg = vtaddr->inst_basereg;
14548 ins->inst_offset = vtaddr->inst_offset;
14551 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14552 /* gsharedvt arg passed by ref */
14553 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14555 ins->opcode = OP_LOAD_MEMBASE;
14556 ins->inst_basereg = var->inst_basereg;
14557 ins->inst_offset = var->inst_offset;
14558 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14559 MonoInst *load, *load2, *load3;
14560 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14561 int reg1, reg2, reg3;
14562 MonoInst *info_var = cfg->gsharedvt_info_var;
14563 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14567 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14570 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14572 g_assert (info_var);
14573 g_assert (locals_var);
14575 /* Mark the instruction used to compute the locals var as used */
14576 cfg->gsharedvt_locals_var_ins = NULL;
14578 /* Load the offset */
14579 if (info_var->opcode == OP_REGOFFSET) {
14580 reg1 = alloc_ireg (cfg);
14581 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14582 } else if (info_var->opcode == OP_REGVAR) {
14584 reg1 = info_var->dreg;
14586 g_assert_not_reached ();
14588 reg2 = alloc_ireg (cfg);
14589 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14590 /* Load the locals area address */
14591 reg3 = alloc_ireg (cfg);
14592 if (locals_var->opcode == OP_REGOFFSET) {
14593 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14594 } else if (locals_var->opcode == OP_REGVAR) {
14595 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14597 g_assert_not_reached ();
14599 /* Compute the address */
14600 ins->opcode = OP_PADD;
14604 mono_bblock_insert_before_ins (bb, ins, load3);
14605 mono_bblock_insert_before_ins (bb, load3, load2);
14607 mono_bblock_insert_before_ins (bb, load2, load);
14609 g_assert (var->opcode == OP_REGOFFSET);
14611 ins->opcode = OP_ADD_IMM;
14612 ins->sreg1 = var->inst_basereg;
14613 ins->inst_imm = var->inst_offset;
14616 *need_local_opts = TRUE;
14617 spec = INS_INFO (ins->opcode);
14620 if (ins->opcode < MONO_CEE_LAST) {
14621 mono_print_ins (ins);
14622 g_assert_not_reached ();
14626 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14630 if (MONO_IS_STORE_MEMBASE (ins)) {
14631 tmp_reg = ins->dreg;
14632 ins->dreg = ins->sreg2;
14633 ins->sreg2 = tmp_reg;
14636 spec2 [MONO_INST_DEST] = ' ';
14637 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14638 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14639 spec2 [MONO_INST_SRC3] = ' ';
14641 } else if (MONO_IS_STORE_MEMINDEX (ins))
14642 g_assert_not_reached ();
14647 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14648 printf ("\t %.3s %d", spec, ins->dreg);
14649 num_sregs = mono_inst_get_src_registers (ins, sregs);
14650 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14651 printf (" %d", sregs [srcindex]);
14658 regtype = spec [MONO_INST_DEST];
14659 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14662 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14663 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14664 MonoInst *store_ins;
14666 MonoInst *def_ins = ins;
14667 int dreg = ins->dreg; /* The original vreg */
14669 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14671 if (var->opcode == OP_REGVAR) {
14672 ins->dreg = var->dreg;
14673 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14675 * Instead of emitting a load+store, use a _membase opcode.
14677 g_assert (var->opcode == OP_REGOFFSET);
14678 if (ins->opcode == OP_MOVE) {
14682 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14683 ins->inst_basereg = var->inst_basereg;
14684 ins->inst_offset = var->inst_offset;
14687 spec = INS_INFO (ins->opcode);
14691 g_assert (var->opcode == OP_REGOFFSET);
14693 prev_dreg = ins->dreg;
14695 /* Invalidate any previous lvreg for this vreg */
14696 vreg_to_lvreg [ins->dreg] = 0;
14700 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14702 store_opcode = OP_STOREI8_MEMBASE_REG;
14705 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14707 #if SIZEOF_REGISTER != 8
14708 if (regtype == 'l') {
14709 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14710 mono_bblock_insert_after_ins (bb, ins, store_ins);
14711 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14712 mono_bblock_insert_after_ins (bb, ins, store_ins);
14713 def_ins = store_ins;
14718 g_assert (store_opcode != OP_STOREV_MEMBASE);
14720 /* Try to fuse the store into the instruction itself */
14721 /* FIXME: Add more instructions */
14722 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14723 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14724 ins->inst_imm = ins->inst_c0;
14725 ins->inst_destbasereg = var->inst_basereg;
14726 ins->inst_offset = var->inst_offset;
14727 spec = INS_INFO (ins->opcode);
14728 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14729 ins->opcode = store_opcode;
14730 ins->inst_destbasereg = var->inst_basereg;
14731 ins->inst_offset = var->inst_offset;
14735 tmp_reg = ins->dreg;
14736 ins->dreg = ins->sreg2;
14737 ins->sreg2 = tmp_reg;
14740 spec2 [MONO_INST_DEST] = ' ';
14741 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14742 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14743 spec2 [MONO_INST_SRC3] = ' ';
14745 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14746 // FIXME: The backends expect the base reg to be in inst_basereg
14747 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14749 ins->inst_basereg = var->inst_basereg;
14750 ins->inst_offset = var->inst_offset;
14751 spec = INS_INFO (ins->opcode);
14753 /* printf ("INS: "); mono_print_ins (ins); */
14754 /* Create a store instruction */
14755 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14757 /* Insert it after the instruction */
14758 mono_bblock_insert_after_ins (bb, ins, store_ins);
14760 def_ins = store_ins;
14763 * We can't assign ins->dreg to var->dreg here, since the
14764 * sregs could use it. So set a flag, and do it after
14767 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14768 dest_has_lvreg = TRUE;
14773 if (def_ins && !live_range_start [dreg]) {
14774 live_range_start [dreg] = def_ins;
14775 live_range_start_bb [dreg] = bb;
14778 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14781 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14782 tmp->inst_c1 = dreg;
14783 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14790 num_sregs = mono_inst_get_src_registers (ins, sregs);
14791 for (srcindex = 0; srcindex < 3; ++srcindex) {
14792 regtype = spec [MONO_INST_SRC1 + srcindex];
14793 sreg = sregs [srcindex];
14795 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14796 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14797 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14798 MonoInst *use_ins = ins;
14799 MonoInst *load_ins;
14800 guint32 load_opcode;
14802 if (var->opcode == OP_REGVAR) {
14803 sregs [srcindex] = var->dreg;
14804 //mono_inst_set_src_registers (ins, sregs);
14805 live_range_end [sreg] = use_ins;
14806 live_range_end_bb [sreg] = bb;
14808 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14811 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14812 /* var->dreg is a hreg */
14813 tmp->inst_c1 = sreg;
14814 mono_bblock_insert_after_ins (bb, ins, tmp);
14820 g_assert (var->opcode == OP_REGOFFSET);
14822 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14824 g_assert (load_opcode != OP_LOADV_MEMBASE);
14826 if (vreg_to_lvreg [sreg]) {
14827 g_assert (vreg_to_lvreg [sreg] != -1);
14829 /* The variable is already loaded to an lvreg */
14830 if (G_UNLIKELY (cfg->verbose_level > 2))
14831 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14832 sregs [srcindex] = vreg_to_lvreg [sreg];
14833 //mono_inst_set_src_registers (ins, sregs);
14837 /* Try to fuse the load into the instruction */
14838 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14839 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14840 sregs [0] = var->inst_basereg;
14841 //mono_inst_set_src_registers (ins, sregs);
14842 ins->inst_offset = var->inst_offset;
14843 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14844 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14845 sregs [1] = var->inst_basereg;
14846 //mono_inst_set_src_registers (ins, sregs);
14847 ins->inst_offset = var->inst_offset;
14849 if (MONO_IS_REAL_MOVE (ins)) {
14850 ins->opcode = OP_NOP;
14853 //printf ("%d ", srcindex); mono_print_ins (ins);
14855 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14857 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14858 if (var->dreg == prev_dreg) {
14860 * sreg refers to the value loaded by the load
14861 * emitted below, but we need to use ins->dreg
14862 * since it refers to the store emitted earlier.
14866 g_assert (sreg != -1);
14867 vreg_to_lvreg [var->dreg] = sreg;
14868 g_assert (lvregs_len < 1024);
14869 lvregs [lvregs_len ++] = var->dreg;
14873 sregs [srcindex] = sreg;
14874 //mono_inst_set_src_registers (ins, sregs);
14876 #if SIZEOF_REGISTER != 8
14877 if (regtype == 'l') {
14878 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14879 mono_bblock_insert_before_ins (bb, ins, load_ins);
14880 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14881 mono_bblock_insert_before_ins (bb, ins, load_ins);
14882 use_ins = load_ins;
14887 #if SIZEOF_REGISTER == 4
14888 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14890 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14891 mono_bblock_insert_before_ins (bb, ins, load_ins);
14892 use_ins = load_ins;
14896 if (var->dreg < orig_next_vreg) {
14897 live_range_end [var->dreg] = use_ins;
14898 live_range_end_bb [var->dreg] = bb;
14901 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14904 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14905 tmp->inst_c1 = var->dreg;
14906 mono_bblock_insert_after_ins (bb, ins, tmp);
14910 mono_inst_set_src_registers (ins, sregs);
14912 if (dest_has_lvreg) {
14913 g_assert (ins->dreg != -1);
14914 vreg_to_lvreg [prev_dreg] = ins->dreg;
14915 g_assert (lvregs_len < 1024);
14916 lvregs [lvregs_len ++] = prev_dreg;
14917 dest_has_lvreg = FALSE;
14921 tmp_reg = ins->dreg;
14922 ins->dreg = ins->sreg2;
14923 ins->sreg2 = tmp_reg;
14926 if (MONO_IS_CALL (ins)) {
14927 /* Clear vreg_to_lvreg array */
14928 for (i = 0; i < lvregs_len; i++)
14929 vreg_to_lvreg [lvregs [i]] = 0;
14931 } else if (ins->opcode == OP_NOP) {
14933 MONO_INST_NULLIFY_SREGS (ins);
14936 if (cfg->verbose_level > 2)
14937 mono_print_ins_index (1, ins);
14940 /* Extend the live range based on the liveness info */
14941 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14942 for (i = 0; i < cfg->num_varinfo; i ++) {
14943 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14945 if (vreg_is_volatile (cfg, vi->vreg))
14946 /* The liveness info is incomplete */
14949 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14950 /* Live from at least the first ins of this bb */
14951 live_range_start [vi->vreg] = bb->code;
14952 live_range_start_bb [vi->vreg] = bb;
14955 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14956 /* Live at least until the last ins of this bb */
14957 live_range_end [vi->vreg] = bb->last_ins;
14958 live_range_end_bb [vi->vreg] = bb;
14965 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14966 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14968 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14969 for (i = 0; i < cfg->num_varinfo; ++i) {
14970 int vreg = MONO_VARINFO (cfg, i)->vreg;
14973 if (live_range_start [vreg]) {
14974 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14976 ins->inst_c1 = vreg;
14977 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14979 if (live_range_end [vreg]) {
14980 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14982 ins->inst_c1 = vreg;
14983 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14984 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14986 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14991 if (cfg->gsharedvt_locals_var_ins) {
14992 /* Nullify if unused */
14993 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14994 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14997 g_free (live_range_start);
14998 g_free (live_range_end);
14999 g_free (live_range_start_bb);
15000 g_free (live_range_end_bb);
15005 * - use 'iadd' instead of 'int_add'
15006 * - handling ovf opcodes: decompose in method_to_ir.
15007 * - unify iregs/fregs
15008 * -> partly done, the missing parts are:
15009 * - a more complete unification would involve unifying the hregs as well, so
15010 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15011 * would no longer map to the machine hregs, so the code generators would need to
15012 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15013 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15014 * fp/non-fp branches speeds it up by about 15%.
15015 * - use sext/zext opcodes instead of shifts
15017 * - get rid of TEMPLOADs if possible and use vregs instead
15018 * - clean up usage of OP_P/OP_ opcodes
15019 * - cleanup usage of DUMMY_USE
15020 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15022 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15023 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15024 * - make sure handle_stack_args () is called before the branch is emitted
15025 * - when the new IR is done, get rid of all unused stuff
15026 * - COMPARE/BEQ as separate instructions or unify them ?
15027 * - keeping them separate allows specialized compare instructions like
15028 * compare_imm, compare_membase
15029 * - most back ends unify fp compare+branch, fp compare+ceq
15030 * - integrate mono_save_args into inline_method
15031 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15032 * - handle long shift opts on 32 bit platforms somehow: they require
15033 * 3 sregs (2 for arg1 and 1 for arg2)
15034 * - make byref a 'normal' type.
15035 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15036 * variable if needed.
15037 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15038 * like inline_method.
15039 * - remove inlining restrictions
15040 * - fix LNEG and enable cfold of INEG
15041 * - generalize x86 optimizations like ldelema as a peephole optimization
15042 * - add store_mem_imm for amd64
15043 * - optimize the loading of the interruption flag in the managed->native wrappers
15044 * - avoid special handling of OP_NOP in passes
15045 * - move code inserting instructions into one function/macro.
15046 * - try a coalescing phase after liveness analysis
15047 * - add float -> vreg conversion + local optimizations on !x86
15048 * - figure out how to handle decomposed branches during optimizations, ie.
15049 * compare+branch, op_jump_table+op_br etc.
15050 * - promote RuntimeXHandles to vregs
15051 * - vtype cleanups:
15052 * - add a NEW_VARLOADA_VREG macro
15053 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15054 * accessing vtype fields.
15055 * - get rid of I8CONST on 64 bit platforms
15056 * - dealing with the increase in code size due to branches created during opcode
15058 * - use extended basic blocks
15059 * - all parts of the JIT
15060 * - handle_global_vregs () && local regalloc
15061 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15062 * - sources of increase in code size:
15065 * - isinst and castclass
15066 * - lvregs not allocated to global registers even if used multiple times
15067 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15069 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15070 * - add all micro optimizations from the old JIT
15071 * - put tree optimizations into the deadce pass
15072 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15073 * specific function.
15074 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15075 * fcompare + branchCC.
15076 * - create a helper function for allocating a stack slot, taking into account
15077 * MONO_CFG_HAS_SPILLUP.
15079 * - merge the ia64 switch changes.
15080 * - optimize mono_regstate2_alloc_int/float.
15081 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15082 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15083 * parts of the tree could be separated by other instructions, killing the tree
15084 * arguments, or stores killing loads etc. Also, should we fold loads into other
15085 * instructions if the result of the load is used multiple times ?
15086 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15087 * - LAST MERGE: 108395.
15088 * - when returning vtypes in registers, generate IR and append it to the end of the
15089 * last bb instead of doing it in the epilog.
15090 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15098 - When to decompose opcodes:
15099 - earlier: this makes some optimizations hard to implement, since the low level IR
15100 no longer contains the neccessary information. But it is easier to do.
15101 - later: harder to implement, enables more optimizations.
15102 - Branches inside bblocks:
15103 - created when decomposing complex opcodes.
15104 - branches to another bblock: harmless, but not tracked by the branch
15105 optimizations, so need to branch to a label at the start of the bblock.
15106 - branches to inside the same bblock: very problematic, trips up the local
15107 reg allocator. Can be fixed by spitting the current bblock, but that is a
15108 complex operation, since some local vregs can become global vregs etc.
15109 - Local/global vregs:
15110 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15111 local register allocator.
15112 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15113 structure, created by mono_create_var (). Assigned to hregs or the stack by
15114 the global register allocator.
15115 - When to do optimizations like alu->alu_imm:
15116 - earlier -> saves work later on since the IR will be smaller/simpler
15117 - later -> can work on more instructions
15118 - Handling of valuetypes:
15119 - When a vtype is pushed on the stack, a new temporary is created, an
15120 instruction computing its address (LDADDR) is emitted and pushed on
15121 the stack. Need to optimize cases when the vtype is used immediately as in
15122 argument passing, stloc etc.
15123 - Instead of the to_end stuff in the old JIT, simply call the function handling
15124 the values on the stack before emitting the last instruction of the bb.
15127 #endif /* DISABLE_JIT */