2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/monitor.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
70 #include "jit-icalls.h"
72 #include "debugger-agent.h"
73 #include "seq-points.h"
74 #include "aot-compiler.h"
75 #include "mini-llvm.h"
77 #define BRANCH_COST 10
78 #define INLINE_LENGTH_LIMIT 20
80 /* These have 'cfg' as an implicit argument */
81 #define INLINE_FAILURE(msg) do { \
82 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
83 inline_failure (cfg, msg); \
84 goto exception_exit; \
87 #define CHECK_CFG_EXCEPTION do {\
88 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
89 goto exception_exit; \
91 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
92 method_access_failure ((cfg), (method), (cmethod)); \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
165 * Instruction metadata
173 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
174 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
180 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
185 /* keep in sync with the enum in mini.h */
188 #include "mini-ops.h"
193 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
194 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
196 * This should contain the index of the last sreg + 1. This is not the same
197 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
199 const gint8 ins_sreg_counts[] = {
200 #include "mini-ops.h"
205 #define MONO_INIT_VARINFO(vi,id) do { \
206 (vi)->range.first_use.pos.bid = 0xffff; \
212 mono_alloc_ireg (MonoCompile *cfg)
214 return alloc_ireg (cfg);
218 mono_alloc_lreg (MonoCompile *cfg)
220 return alloc_lreg (cfg);
224 mono_alloc_freg (MonoCompile *cfg)
226 return alloc_freg (cfg);
230 mono_alloc_preg (MonoCompile *cfg)
232 return alloc_preg (cfg);
236 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
238 return alloc_dreg (cfg, stack_type);
242 * mono_alloc_ireg_ref:
244 * Allocate an IREG, and mark it as holding a GC ref.
247 mono_alloc_ireg_ref (MonoCompile *cfg)
249 return alloc_ireg_ref (cfg);
253 * mono_alloc_ireg_mp:
255 * Allocate an IREG, and mark it as holding a managed pointer.
258 mono_alloc_ireg_mp (MonoCompile *cfg)
260 return alloc_ireg_mp (cfg);
264 * mono_alloc_ireg_copy:
266 * Allocate an IREG with the same GC type as VREG.
269 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
271 if (vreg_is_ref (cfg, vreg))
272 return alloc_ireg_ref (cfg);
273 else if (vreg_is_mp (cfg, vreg))
274 return alloc_ireg_mp (cfg);
276 return alloc_ireg (cfg);
280 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
285 type = mini_get_underlying_type (type);
287 switch (type->type) {
300 case MONO_TYPE_FNPTR:
302 case MONO_TYPE_CLASS:
303 case MONO_TYPE_STRING:
304 case MONO_TYPE_OBJECT:
305 case MONO_TYPE_SZARRAY:
306 case MONO_TYPE_ARRAY:
310 #if SIZEOF_REGISTER == 8
316 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
319 case MONO_TYPE_VALUETYPE:
320 if (type->data.klass->enumtype) {
321 type = mono_class_enum_basetype (type->data.klass);
324 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 case MONO_TYPE_TYPEDBYREF:
329 case MONO_TYPE_GENERICINST:
330 type = &type->data.generic_class->container_class->byval_arg;
334 g_assert (cfg->gshared);
335 if (mini_type_var_is_vt (type))
338 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
340 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
346 mono_print_bb (MonoBasicBlock *bb, const char *msg)
351 printf ("\n%s %d: [IN: ", msg, bb->block_num);
352 for (i = 0; i < bb->in_count; ++i)
353 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
355 for (i = 0; i < bb->out_count; ++i)
356 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
383 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
384 g_free (method_fname);
385 g_free (cil_method_fname);
388 static MONO_NEVER_INLINE void
389 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
391 char *method_fname = mono_method_full_name (method, TRUE);
392 char *field_fname = mono_field_full_name (field);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
394 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
395 g_free (method_fname);
396 g_free (field_fname);
399 static MONO_NEVER_INLINE void
400 inline_failure (MonoCompile *cfg, const char *msg)
402 if (cfg->verbose_level >= 2)
403 printf ("inline failed: %s\n", msg);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
407 static MONO_NEVER_INLINE void
408 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 if (cfg->verbose_level > 2) \
411 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
415 static MONO_NEVER_INLINE void
416 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
418 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
419 if (cfg->verbose_level >= 2)
420 printf ("%s\n", cfg->exception_message);
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
425 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
426 * foo<T> (int i) { ldarg.0; box T; }
428 #define UNVERIFIED do { \
429 if (cfg->gsharedvt) { \
430 if (cfg->verbose_level > 2) \
431 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
432 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
433 goto exception_exit; \
435 break_on_unverified (); \
439 #define GET_BBLOCK(cfg,tblock,ip) do { \
440 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
442 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
443 NEW_BBLOCK (cfg, (tblock)); \
444 (tblock)->cil_code = (ip); \
445 ADD_BBLOCK (cfg, (tblock)); \
449 #if defined(TARGET_X86) || defined(TARGET_AMD64)
450 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
451 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
452 (dest)->dreg = alloc_ireg_mp ((cfg)); \
453 (dest)->sreg1 = (sr1); \
454 (dest)->sreg2 = (sr2); \
455 (dest)->inst_imm = (imm); \
456 (dest)->backend.shift_amount = (shift); \
457 MONO_ADD_INS ((cfg)->cbb, (dest)); \
461 /* Emit conversions so both operands of a binary opcode are of the same type */
463 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
465 MonoInst *arg1 = *arg1_ref;
466 MonoInst *arg2 = *arg2_ref;
469 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
470 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
473 /* Mixing r4/r8 is allowed by the spec */
474 if (arg1->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
478 conv->type = STACK_R8;
482 if (arg2->type == STACK_R4) {
483 int dreg = alloc_freg (cfg);
485 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
486 conv->type = STACK_R8;
492 #if SIZEOF_REGISTER == 8
493 /* FIXME: Need to add many more cases */
494 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
497 int dr = alloc_preg (cfg);
498 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
499 (ins)->sreg2 = widen->dreg;
504 #define ADD_BINOP(op) do { \
505 MONO_INST_NEW (cfg, ins, (op)); \
507 ins->sreg1 = sp [0]->dreg; \
508 ins->sreg2 = sp [1]->dreg; \
509 type_from_op (cfg, ins, sp [0], sp [1]); \
511 /* Have to insert a widening op */ \
512 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
513 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
518 #define ADD_UNOP(op) do { \
519 MONO_INST_NEW (cfg, ins, (op)); \
521 ins->sreg1 = sp [0]->dreg; \
522 type_from_op (cfg, ins, sp [0], NULL); \
524 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
525 MONO_ADD_INS ((cfg)->cbb, (ins)); \
526 *sp++ = mono_decompose_opcode (cfg, ins); \
529 #define ADD_BINCOND(next_block) do { \
532 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
533 cmp->sreg1 = sp [0]->dreg; \
534 cmp->sreg2 = sp [1]->dreg; \
535 type_from_op (cfg, cmp, sp [0], sp [1]); \
537 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
538 type_from_op (cfg, ins, sp [0], sp [1]); \
539 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
540 GET_BBLOCK (cfg, tblock, target); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_true_bb = tblock; \
543 if ((next_block)) { \
544 link_bblock (cfg, cfg->cbb, (next_block)); \
545 ins->inst_false_bb = (next_block); \
546 start_new_bblock = 1; \
548 GET_BBLOCK (cfg, tblock, ip); \
549 link_bblock (cfg, cfg->cbb, tblock); \
550 ins->inst_false_bb = tblock; \
551 start_new_bblock = 2; \
553 if (sp != stack_start) { \
554 handle_stack_args (cfg, stack_start, sp - stack_start); \
555 CHECK_UNVERIFIABLE (cfg); \
557 MONO_ADD_INS (cfg->cbb, cmp); \
558 MONO_ADD_INS (cfg->cbb, ins); \
562 * link_bblock: Links two basic blocks
564 * links two basic blocks in the control flow graph, the 'from'
565 * argument is the starting block and the 'to' argument is the block
566 * the control flow ends to after 'from'.
569 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
571 MonoBasicBlock **newa;
575 if (from->cil_code) {
577 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
579 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
582 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
584 printf ("edge from entry to exit\n");
589 for (i = 0; i < from->out_count; ++i) {
590 if (to == from->out_bb [i]) {
596 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
597 for (i = 0; i < from->out_count; ++i) {
598 newa [i] = from->out_bb [i];
606 for (i = 0; i < to->in_count; ++i) {
607 if (from == to->in_bb [i]) {
613 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
614 for (i = 0; i < to->in_count; ++i) {
615 newa [i] = to->in_bb [i];
624 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
626 link_bblock (cfg, from, to);
630 * mono_find_block_region:
632 * We mark each basic block with a region ID. We use that to avoid BB
633 * optimizations when blocks are in different regions.
636 * A region token that encodes where this region is, and information
637 * about the clause owner for this block.
639 * The region encodes the try/catch/filter clause that owns this block
640 * as well as the type. -1 is a special value that represents a block
641 * that is in none of try/catch/filter.
644 mono_find_block_region (MonoCompile *cfg, int offset)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
650 for (i = 0; i < header->num_clauses; ++i) {
651 clause = &header->clauses [i];
652 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
653 (offset < (clause->handler_offset)))
654 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
656 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
657 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
658 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
659 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
660 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
662 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
665 for (i = 0; i < header->num_clauses; ++i) {
666 clause = &header->clauses [i];
668 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
669 return ((i + 1) << 8) | clause->flags;
676 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
678 MonoMethodHeader *header = cfg->header;
679 MonoExceptionClause *clause;
683 for (i = 0; i < header->num_clauses; ++i) {
684 clause = &header->clauses [i];
685 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
686 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
687 if (clause->flags == type)
688 res = g_list_append (res, clause);
695 mono_create_spvar_for_region (MonoCompile *cfg, int region)
699 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
703 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
704 /* prevent it from being register allocated */
705 var->flags |= MONO_INST_VOLATILE;
707 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
711 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
713 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
717 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
721 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
725 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
726 /* prevent it from being register allocated */
727 var->flags |= MONO_INST_VOLATILE;
729 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
735 * Returns the type used in the eval stack when @type is loaded.
736 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
739 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
743 type = mini_get_underlying_type (type);
744 inst->klass = klass = mono_class_from_mono_type (type);
746 inst->type = STACK_MP;
751 switch (type->type) {
753 inst->type = STACK_INV;
761 inst->type = STACK_I4;
766 case MONO_TYPE_FNPTR:
767 inst->type = STACK_PTR;
769 case MONO_TYPE_CLASS:
770 case MONO_TYPE_STRING:
771 case MONO_TYPE_OBJECT:
772 case MONO_TYPE_SZARRAY:
773 case MONO_TYPE_ARRAY:
774 inst->type = STACK_OBJ;
778 inst->type = STACK_I8;
781 inst->type = cfg->r4_stack_type;
784 inst->type = STACK_R8;
786 case MONO_TYPE_VALUETYPE:
787 if (type->data.klass->enumtype) {
788 type = mono_class_enum_basetype (type->data.klass);
792 inst->type = STACK_VTYPE;
795 case MONO_TYPE_TYPEDBYREF:
796 inst->klass = mono_defaults.typed_reference_class;
797 inst->type = STACK_VTYPE;
799 case MONO_TYPE_GENERICINST:
800 type = &type->data.generic_class->container_class->byval_arg;
804 g_assert (cfg->gshared);
805 if (mini_is_gsharedvt_type (type)) {
806 g_assert (cfg->gsharedvt);
807 inst->type = STACK_VTYPE;
809 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
813 g_error ("unknown type 0x%02x in eval stack type", type->type);
818 * The following tables are used to quickly validate the IL code in type_from_op ().
821 bin_num_table [STACK_MAX] [STACK_MAX] = {
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
827 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
835 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
838 /* reduce the size of this table */
840 bin_int_table [STACK_MAX] [STACK_MAX] = {
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
852 bin_comp_table [STACK_MAX] [STACK_MAX] = {
853 /* Inv i L p F & O vt r4 */
855 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
856 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
857 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
859 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
860 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
861 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
862 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
865 /* reduce the size of this table */
867 shift_table [STACK_MAX] [STACK_MAX] = {
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
879 * Tables to map from the non-specific opcode to the matching
880 * type-specific opcode.
882 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
884 binops_op_map [STACK_MAX] = {
885 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
888 /* handles from CEE_NEG to CEE_CONV_U8 */
890 unops_op_map [STACK_MAX] = {
891 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
894 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
896 ovfops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
900 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
902 ovf2ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
906 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
908 ovf3ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
912 /* handles from CEE_BEQ to CEE_BLT_UN */
914 beqops_op_map [STACK_MAX] = {
915 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
918 /* handles from CEE_CEQ to CEE_CLT_UN */
920 ceqops_op_map [STACK_MAX] = {
921 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
925 * Sets ins->type (the type on the eval stack) according to the
926 * type of the opcode and the arguments to it.
927 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
929 * FIXME: this function sets ins->type unconditionally in some cases, but
930 * it should set it to invalid for some types (a conv.x on an object)
933 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
935 switch (ins->opcode) {
942 /* FIXME: check unverifiable args for STACK_MP */
943 ins->type = bin_num_table [src1->type] [src2->type];
944 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_int_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = shift_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE;
966 else if (src1->type == STACK_R4)
967 ins->opcode = OP_RCOMPARE;
968 else if (src1->type == STACK_R8)
969 ins->opcode = OP_FCOMPARE;
971 ins->opcode = OP_ICOMPARE;
973 case OP_ICOMPARE_IMM:
974 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
975 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
976 ins->opcode = OP_LCOMPARE_IMM;
988 ins->opcode += beqops_op_map [src1->type];
991 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
998 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1003 ins->type = neg_table [src1->type];
1004 ins->opcode += unops_op_map [ins->type];
1007 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1008 ins->type = src1->type;
1010 ins->type = STACK_INV;
1011 ins->opcode += unops_op_map [ins->type];
1017 ins->type = STACK_I4;
1018 ins->opcode += unops_op_map [src1->type];
1021 ins->type = STACK_R8;
1022 switch (src1->type) {
1025 ins->opcode = OP_ICONV_TO_R_UN;
1028 ins->opcode = OP_LCONV_TO_R_UN;
1032 case CEE_CONV_OVF_I1:
1033 case CEE_CONV_OVF_U1:
1034 case CEE_CONV_OVF_I2:
1035 case CEE_CONV_OVF_U2:
1036 case CEE_CONV_OVF_I4:
1037 case CEE_CONV_OVF_U4:
1038 ins->type = STACK_I4;
1039 ins->opcode += ovf3ops_op_map [src1->type];
1041 case CEE_CONV_OVF_I_UN:
1042 case CEE_CONV_OVF_U_UN:
1043 ins->type = STACK_PTR;
1044 ins->opcode += ovf2ops_op_map [src1->type];
1046 case CEE_CONV_OVF_I1_UN:
1047 case CEE_CONV_OVF_I2_UN:
1048 case CEE_CONV_OVF_I4_UN:
1049 case CEE_CONV_OVF_U1_UN:
1050 case CEE_CONV_OVF_U2_UN:
1051 case CEE_CONV_OVF_U4_UN:
1052 ins->type = STACK_I4;
1053 ins->opcode += ovf2ops_op_map [src1->type];
1056 ins->type = STACK_PTR;
1057 switch (src1->type) {
1059 ins->opcode = OP_ICONV_TO_U;
1063 #if SIZEOF_VOID_P == 8
1064 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_MOVE;
1070 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_FCONV_TO_U;
1079 ins->type = STACK_I8;
1080 ins->opcode += unops_op_map [src1->type];
1082 case CEE_CONV_OVF_I8:
1083 case CEE_CONV_OVF_U8:
1084 ins->type = STACK_I8;
1085 ins->opcode += ovf3ops_op_map [src1->type];
1087 case CEE_CONV_OVF_U8_UN:
1088 case CEE_CONV_OVF_I8_UN:
1089 ins->type = STACK_I8;
1090 ins->opcode += ovf2ops_op_map [src1->type];
1093 ins->type = cfg->r4_stack_type;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1098 ins->opcode += unops_op_map [src1->type];
1101 ins->type = STACK_R8;
1105 ins->type = STACK_I4;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_CONV_OVF_I:
1110 case CEE_CONV_OVF_U:
1111 ins->type = STACK_PTR;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_ADD_OVF_UN:
1117 case CEE_MUL_OVF_UN:
1119 case CEE_SUB_OVF_UN:
1120 ins->type = bin_num_table [src1->type] [src2->type];
1121 ins->opcode += ovfops_op_map [src1->type];
1122 if (ins->type == STACK_R8)
1123 ins->type = STACK_INV;
1125 case OP_LOAD_MEMBASE:
1126 ins->type = STACK_PTR;
1128 case OP_LOADI1_MEMBASE:
1129 case OP_LOADU1_MEMBASE:
1130 case OP_LOADI2_MEMBASE:
1131 case OP_LOADU2_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1133 case OP_LOADU4_MEMBASE:
1134 ins->type = STACK_PTR;
1136 case OP_LOADI8_MEMBASE:
1137 ins->type = STACK_I8;
1139 case OP_LOADR4_MEMBASE:
1140 ins->type = cfg->r4_stack_type;
1142 case OP_LOADR8_MEMBASE:
1143 ins->type = STACK_R8;
1146 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1150 if (ins->type == STACK_MP)
1151 ins->klass = mono_defaults.object_class;
1156 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1162 param_table [STACK_MAX] [STACK_MAX] = {
1167 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1172 switch (args->type) {
1182 for (i = 0; i < sig->param_count; ++i) {
1183 switch (args [i].type) {
1187 if (!sig->params [i]->byref)
1191 if (sig->params [i]->byref)
1193 switch (sig->params [i]->type) {
1194 case MONO_TYPE_CLASS:
1195 case MONO_TYPE_STRING:
1196 case MONO_TYPE_OBJECT:
1197 case MONO_TYPE_SZARRAY:
1198 case MONO_TYPE_ARRAY:
1205 if (sig->params [i]->byref)
1207 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1216 /*if (!param_table [args [i].type] [sig->params [i]->type])
1224 * When we need a pointer to the current domain many times in a method, we
1225 * call mono_domain_get() once and we store the result in a local variable.
1226 * This function returns the variable that represents the MonoDomain*.
1228 inline static MonoInst *
1229 mono_get_domainvar (MonoCompile *cfg)
1231 if (!cfg->domainvar)
1232 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1233 return cfg->domainvar;
1237 * The got_var contains the address of the Global Offset Table when AOT
1241 mono_get_got_var (MonoCompile *cfg)
1243 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1245 if (!cfg->got_var) {
1246 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1248 return cfg->got_var;
1252 mono_get_vtable_var (MonoCompile *cfg)
1254 g_assert (cfg->gshared);
1256 if (!cfg->rgctx_var) {
1257 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1258 /* force the var to be stack allocated */
1259 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1262 return cfg->rgctx_var;
1266 type_from_stack_type (MonoInst *ins) {
1267 switch (ins->type) {
1268 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1269 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1270 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1271 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1272 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1274 return &ins->klass->this_arg;
1275 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1276 case STACK_VTYPE: return &ins->klass->byval_arg;
1278 g_error ("stack type %d to monotype not handled\n", ins->type);
1283 static G_GNUC_UNUSED int
1284 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1286 t = mono_type_get_underlying_type (t);
1298 case MONO_TYPE_FNPTR:
1300 case MONO_TYPE_CLASS:
1301 case MONO_TYPE_STRING:
1302 case MONO_TYPE_OBJECT:
1303 case MONO_TYPE_SZARRAY:
1304 case MONO_TYPE_ARRAY:
1310 return cfg->r4_stack_type;
1313 case MONO_TYPE_VALUETYPE:
1314 case MONO_TYPE_TYPEDBYREF:
1316 case MONO_TYPE_GENERICINST:
1317 if (mono_type_generic_inst_is_valuetype (t))
1323 g_assert_not_reached ();
1330 array_access_to_klass (int opcode)
1334 return mono_defaults.byte_class;
1336 return mono_defaults.uint16_class;
1339 return mono_defaults.int_class;
1342 return mono_defaults.sbyte_class;
1345 return mono_defaults.int16_class;
1348 return mono_defaults.int32_class;
1350 return mono_defaults.uint32_class;
1353 return mono_defaults.int64_class;
1356 return mono_defaults.single_class;
1359 return mono_defaults.double_class;
1360 case CEE_LDELEM_REF:
1361 case CEE_STELEM_REF:
1362 return mono_defaults.object_class;
1364 g_assert_not_reached ();
1370 * We try to share variables when possible
1373 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1378 /* inlining can result in deeper stacks */
1379 if (slot >= cfg->header->max_stack)
1380 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1382 pos = ins->type - 1 + slot * STACK_MAX;
1384 switch (ins->type) {
1391 if ((vnum = cfg->intvars [pos]))
1392 return cfg->varinfo [vnum];
1393 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1394 cfg->intvars [pos] = res->inst_c0;
1397 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1403 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1406 * Don't use this if a generic_context is set, since that means AOT can't
1407 * look up the method using just the image+token.
1408 * table == 0 means this is a reference made from a wrapper.
1410 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1411 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1412 jump_info_token->image = image;
1413 jump_info_token->token = token;
1414 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1419 * This function is called to handle items that are left on the evaluation stack
1420 * at basic block boundaries. What happens is that we save the values to local variables
1421 * and we reload them later when first entering the target basic block (with the
1422 * handle_loaded_temps () function).
1423 * A single joint point will use the same variables (stored in the array bb->out_stack or
1424 * bb->in_stack, if the basic block is before or after the joint point).
1426 * This function needs to be called _before_ emitting the last instruction of
1427 * the bb (i.e. before emitting a branch).
1428 * If the stack merge fails at a join point, cfg->unverifiable is set.
1431 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1434 MonoBasicBlock *bb = cfg->cbb;
1435 MonoBasicBlock *outb;
1436 MonoInst *inst, **locals;
1441 if (cfg->verbose_level > 3)
1442 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1443 if (!bb->out_scount) {
1444 bb->out_scount = count;
1445 //printf ("bblock %d has out:", bb->block_num);
1447 for (i = 0; i < bb->out_count; ++i) {
1448 outb = bb->out_bb [i];
1449 /* exception handlers are linked, but they should not be considered for stack args */
1450 if (outb->flags & BB_EXCEPTION_HANDLER)
1452 //printf (" %d", outb->block_num);
1453 if (outb->in_stack) {
1455 bb->out_stack = outb->in_stack;
1461 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1462 for (i = 0; i < count; ++i) {
1464 * try to reuse temps already allocated for this purpouse, if they occupy the same
1465 * stack slot and if they are of the same type.
1466 * This won't cause conflicts since if 'local' is used to
1467 * store one of the values in the in_stack of a bblock, then
1468 * the same variable will be used for the same outgoing stack
1470 * This doesn't work when inlining methods, since the bblocks
1471 * in the inlined methods do not inherit their in_stack from
1472 * the bblock they are inlined to. See bug #58863 for an
1475 if (cfg->inlined_method)
1476 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1478 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1483 for (i = 0; i < bb->out_count; ++i) {
1484 outb = bb->out_bb [i];
1485 /* exception handlers are linked, but they should not be considered for stack args */
1486 if (outb->flags & BB_EXCEPTION_HANDLER)
1488 if (outb->in_scount) {
1489 if (outb->in_scount != bb->out_scount) {
1490 cfg->unverifiable = TRUE;
1493 continue; /* check they are the same locals */
1495 outb->in_scount = count;
1496 outb->in_stack = bb->out_stack;
1499 locals = bb->out_stack;
1501 for (i = 0; i < count; ++i) {
1502 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1503 inst->cil_code = sp [i]->cil_code;
1504 sp [i] = locals [i];
1505 if (cfg->verbose_level > 3)
1506 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1510 * It is possible that the out bblocks already have in_stack assigned, and
1511 * the in_stacks differ. In this case, we will store to all the different
1518 /* Find a bblock which has a different in_stack */
1520 while (bindex < bb->out_count) {
1521 outb = bb->out_bb [bindex];
1522 /* exception handlers are linked, but they should not be considered for stack args */
1523 if (outb->flags & BB_EXCEPTION_HANDLER) {
1527 if (outb->in_stack != locals) {
1528 for (i = 0; i < count; ++i) {
1529 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1530 inst->cil_code = sp [i]->cil_code;
1531 sp [i] = locals [i];
1532 if (cfg->verbose_level > 3)
1533 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1535 locals = outb->in_stack;
1545 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1549 if (cfg->compile_aot) {
1550 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1555 ji.type = patch_type;
1556 ji.data.target = data;
1557 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
1559 EMIT_NEW_PCONST (cfg, ins, target);
1565 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1567 int ibitmap_reg = alloc_preg (cfg);
1568 #ifdef COMPRESSED_INTERFACE_BITMAP
1570 MonoInst *res, *ins;
1571 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1572 MONO_ADD_INS (cfg->cbb, ins);
1574 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1575 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1576 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1578 int ibitmap_byte_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1582 if (cfg->compile_aot) {
1583 int iid_reg = alloc_preg (cfg);
1584 int shifted_iid_reg = alloc_preg (cfg);
1585 int ibitmap_byte_address_reg = alloc_preg (cfg);
1586 int masked_iid_reg = alloc_preg (cfg);
1587 int iid_one_bit_reg = alloc_preg (cfg);
1588 int iid_bit_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1591 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1592 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1594 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1595 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1596 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1605 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1606 * stored in "klass_reg" implements the interface "klass".
1609 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1611 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1615 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1616 * stored in "vtable_reg" implements the interface "klass".
1619 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1621 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1625 * Emit code which checks whenever the interface id of @klass is smaller than
1626 * than the value given by max_iid_reg.
1629 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1630 MonoBasicBlock *false_target)
1632 if (cfg->compile_aot) {
1633 int iid_reg = alloc_preg (cfg);
1634 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1635 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1642 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1645 /* Same as above, but obtains max_iid from a vtable */
1647 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1648 MonoBasicBlock *false_target)
1650 int max_iid_reg = alloc_preg (cfg);
1652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1653 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1656 /* Same as above, but obtains max_iid from a klass */
1658 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1659 MonoBasicBlock *false_target)
1661 int max_iid_reg = alloc_preg (cfg);
1663 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1664 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1668 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1670 int idepth_reg = alloc_preg (cfg);
1671 int stypes_reg = alloc_preg (cfg);
1672 int stype = alloc_preg (cfg);
1674 mono_class_setup_supertypes (klass);
1676 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1677 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1684 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1685 } else if (cfg->compile_aot) {
1686 int const_reg = alloc_preg (cfg);
1687 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1688 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1696 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1698 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1702 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1704 int intf_reg = alloc_preg (cfg);
1706 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1707 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1708 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1712 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1716 * Variant of the above that takes a register to the class, not the vtable.
1719 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1721 int intf_bit_reg = alloc_preg (cfg);
1723 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1724 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1729 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1733 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1736 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1738 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1739 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1741 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1745 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1747 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1751 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1753 if (cfg->compile_aot) {
1754 int const_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1756 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1760 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1764 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1767 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1770 int rank_reg = alloc_preg (cfg);
1771 int eclass_reg = alloc_preg (cfg);
1773 g_assert (!klass_inst);
1774 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1775 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1776 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1777 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1778 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1779 if (klass->cast_class == mono_defaults.object_class) {
1780 int parent_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1782 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1783 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1784 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1785 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1786 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1787 } else if (klass->cast_class == mono_defaults.enum_class) {
1788 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1789 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1790 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1792 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1793 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1796 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1797 /* Check that the object is a vector too */
1798 int bounds_reg = alloc_preg (cfg);
1799 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1801 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1804 int idepth_reg = alloc_preg (cfg);
1805 int stypes_reg = alloc_preg (cfg);
1806 int stype = alloc_preg (cfg);
1808 mono_class_setup_supertypes (klass);
1810 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1813 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1816 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1817 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1822 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1824 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1828 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1832 g_assert (val == 0);
1837 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1840 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1848 #if SIZEOF_REGISTER == 8
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1856 val_reg = alloc_preg (cfg);
1858 if (SIZEOF_REGISTER == 8)
1859 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1861 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1864 /* This could be optimized further if neccesary */
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1873 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1887 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1897 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1904 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1911 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1912 g_assert (size < 10000);
1915 /* This could be optimized further if neccesary */
1917 cur_reg = alloc_preg (cfg);
1918 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1926 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1928 cur_reg = alloc_preg (cfg);
1929 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1930 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1938 cur_reg = alloc_preg (cfg);
1939 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1940 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1946 cur_reg = alloc_preg (cfg);
1947 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1948 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1954 cur_reg = alloc_preg (cfg);
1955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1956 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1964 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1968 if (cfg->compile_aot) {
1969 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1970 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1972 ins->sreg2 = c->dreg;
1973 MONO_ADD_INS (cfg->cbb, ins);
1975 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1977 ins->inst_offset = mini_get_tls_offset (tls_key);
1978 MONO_ADD_INS (cfg->cbb, ins);
1985 * Emit IR to push the current LMF onto the LMF stack.
1988 emit_push_lmf (MonoCompile *cfg)
1991 * Emit IR to push the LMF:
1992 * lmf_addr = <lmf_addr from tls>
1993 * lmf->lmf_addr = lmf_addr
1994 * lmf->prev_lmf = *lmf_addr
1997 int lmf_reg, prev_lmf_reg;
1998 MonoInst *ins, *lmf_ins;
2003 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2004 /* Load current lmf */
2005 lmf_ins = mono_get_lmf_intrinsic (cfg);
2007 MONO_ADD_INS (cfg->cbb, lmf_ins);
2008 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2009 lmf_reg = ins->dreg;
2010 /* Save previous_lmf */
2011 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2013 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2016 * Store lmf_addr in a variable, so it can be allocated to a global register.
2018 if (!cfg->lmf_addr_var)
2019 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2022 ins = mono_get_jit_tls_intrinsic (cfg);
2024 int jit_tls_dreg = ins->dreg;
2026 MONO_ADD_INS (cfg->cbb, ins);
2027 lmf_reg = alloc_preg (cfg);
2028 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2030 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2033 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2035 MONO_ADD_INS (cfg->cbb, lmf_ins);
2038 MonoInst *args [16], *jit_tls_ins, *ins;
2040 /* Inline mono_get_lmf_addr () */
2041 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2043 /* Load mono_jit_tls_id */
2044 if (cfg->compile_aot)
2045 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2047 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2048 /* call pthread_getspecific () */
2049 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2050 /* lmf_addr = &jit_tls->lmf */
2051 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2054 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2058 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2060 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2061 lmf_reg = ins->dreg;
2063 prev_lmf_reg = alloc_preg (cfg);
2064 /* Save previous_lmf */
2065 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2066 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2068 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2075 * Emit IR to pop the current LMF from the LMF stack.
2078 emit_pop_lmf (MonoCompile *cfg)
2080 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2086 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2087 lmf_reg = ins->dreg;
2089 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2090 /* Load previous_lmf */
2091 prev_lmf_reg = alloc_preg (cfg);
2092 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2094 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2097 * Emit IR to pop the LMF:
2098 * *(lmf->lmf_addr) = lmf->prev_lmf
2100 /* This could be called before emit_push_lmf () */
2101 if (!cfg->lmf_addr_var)
2102 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2103 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2105 prev_lmf_reg = alloc_preg (cfg);
2106 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2107 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2112 emit_instrumentation_call (MonoCompile *cfg, void *func)
2114 MonoInst *iargs [1];
2117 * Avoid instrumenting inlined methods since it can
2118 * distort profiling results.
2120 if (cfg->method != cfg->current_method)
2123 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2124 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2125 mono_emit_jit_icall (cfg, func, iargs);
2130 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2133 type = mini_get_underlying_type (type);
2134 switch (type->type) {
2135 case MONO_TYPE_VOID:
2136 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2143 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2147 case MONO_TYPE_FNPTR:
2148 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2149 case MONO_TYPE_CLASS:
2150 case MONO_TYPE_STRING:
2151 case MONO_TYPE_OBJECT:
2152 case MONO_TYPE_SZARRAY:
2153 case MONO_TYPE_ARRAY:
2154 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2157 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2160 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2162 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2164 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2165 case MONO_TYPE_VALUETYPE:
2166 if (type->data.klass->enumtype) {
2167 type = mono_class_enum_basetype (type->data.klass);
2170 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2171 case MONO_TYPE_TYPEDBYREF:
2172 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2173 case MONO_TYPE_GENERICINST:
2174 type = &type->data.generic_class->container_class->byval_arg;
2177 case MONO_TYPE_MVAR:
2179 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2181 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2187 * target_type_is_incompatible:
2188 * @cfg: MonoCompile context
2190 * Check that the item @arg on the evaluation stack can be stored
2191 * in the target type (can be a local, or field, etc).
2192 * The cfg arg can be used to check if we need verification or just
2195 * Returns: non-0 value if arg can't be stored on a target.
2198 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2200 MonoType *simple_type;
2203 if (target->byref) {
2204 /* FIXME: check that the pointed to types match */
2205 if (arg->type == STACK_MP) {
2206 MonoClass *base_class = mono_class_from_mono_type (target);
2207 /* This is needed to handle gshared types + ldaddr */
2208 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2209 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2211 if (arg->type == STACK_PTR)
2216 simple_type = mini_get_underlying_type (target);
2217 switch (simple_type->type) {
2218 case MONO_TYPE_VOID:
2226 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2230 /* STACK_MP is needed when setting pinned locals */
2231 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2236 case MONO_TYPE_FNPTR:
2238 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2239 * in native int. (#688008).
2241 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2244 case MONO_TYPE_CLASS:
2245 case MONO_TYPE_STRING:
2246 case MONO_TYPE_OBJECT:
2247 case MONO_TYPE_SZARRAY:
2248 case MONO_TYPE_ARRAY:
2249 if (arg->type != STACK_OBJ)
2251 /* FIXME: check type compatibility */
2255 if (arg->type != STACK_I8)
2259 if (arg->type != cfg->r4_stack_type)
2263 if (arg->type != STACK_R8)
2266 case MONO_TYPE_VALUETYPE:
2267 if (arg->type != STACK_VTYPE)
2269 klass = mono_class_from_mono_type (simple_type);
2270 if (klass != arg->klass)
2273 case MONO_TYPE_TYPEDBYREF:
2274 if (arg->type != STACK_VTYPE)
2276 klass = mono_class_from_mono_type (simple_type);
2277 if (klass != arg->klass)
2280 case MONO_TYPE_GENERICINST:
2281 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2282 MonoClass *target_class;
2283 if (arg->type != STACK_VTYPE)
2285 klass = mono_class_from_mono_type (simple_type);
2286 target_class = mono_class_from_mono_type (target);
2287 /* The second cases is needed when doing partial sharing */
2288 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2292 if (arg->type != STACK_OBJ)
2294 /* FIXME: check type compatibility */
2298 case MONO_TYPE_MVAR:
2299 g_assert (cfg->gshared);
2300 if (mini_type_var_is_vt (simple_type)) {
2301 if (arg->type != STACK_VTYPE)
2304 if (arg->type != STACK_OBJ)
2309 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2315 * Prepare arguments for passing to a function call.
2316 * Return a non-zero value if the arguments can't be passed to the given
2318 * The type checks are not yet complete and some conversions may need
2319 * casts on 32 or 64 bit architectures.
2321 * FIXME: implement this using target_type_is_incompatible ()
2324 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2326 MonoType *simple_type;
2330 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2334 for (i = 0; i < sig->param_count; ++i) {
2335 if (sig->params [i]->byref) {
2336 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2340 simple_type = mini_get_underlying_type (sig->params [i]);
2342 switch (simple_type->type) {
2343 case MONO_TYPE_VOID:
2352 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2358 case MONO_TYPE_FNPTR:
2359 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2362 case MONO_TYPE_CLASS:
2363 case MONO_TYPE_STRING:
2364 case MONO_TYPE_OBJECT:
2365 case MONO_TYPE_SZARRAY:
2366 case MONO_TYPE_ARRAY:
2367 if (args [i]->type != STACK_OBJ)
2372 if (args [i]->type != STACK_I8)
2376 if (args [i]->type != cfg->r4_stack_type)
2380 if (args [i]->type != STACK_R8)
2383 case MONO_TYPE_VALUETYPE:
2384 if (simple_type->data.klass->enumtype) {
2385 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2388 if (args [i]->type != STACK_VTYPE)
2391 case MONO_TYPE_TYPEDBYREF:
2392 if (args [i]->type != STACK_VTYPE)
2395 case MONO_TYPE_GENERICINST:
2396 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2399 case MONO_TYPE_MVAR:
2401 if (args [i]->type != STACK_VTYPE)
2405 g_error ("unknown type 0x%02x in check_call_signature",
2413 callvirt_to_call (int opcode)
2416 case OP_CALL_MEMBASE:
2418 case OP_VOIDCALL_MEMBASE:
2420 case OP_FCALL_MEMBASE:
2422 case OP_RCALL_MEMBASE:
2424 case OP_VCALL_MEMBASE:
2426 case OP_LCALL_MEMBASE:
2429 g_assert_not_reached ();
2436 callvirt_to_call_reg (int opcode)
2439 case OP_CALL_MEMBASE:
2441 case OP_VOIDCALL_MEMBASE:
2442 return OP_VOIDCALL_REG;
2443 case OP_FCALL_MEMBASE:
2444 return OP_FCALL_REG;
2445 case OP_RCALL_MEMBASE:
2446 return OP_RCALL_REG;
2447 case OP_VCALL_MEMBASE:
2448 return OP_VCALL_REG;
2449 case OP_LCALL_MEMBASE:
2450 return OP_LCALL_REG;
2452 g_assert_not_reached ();
2458 /* Either METHOD or IMT_ARG needs to be set */
2460 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2464 if (COMPILE_LLVM (cfg)) {
2466 method_reg = alloc_preg (cfg);
2467 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2469 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2470 method_reg = ins->dreg;
2474 call->imt_arg_reg = method_reg;
2476 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2481 method_reg = alloc_preg (cfg);
2482 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2484 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2485 method_reg = ins->dreg;
2488 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2491 static MonoJumpInfo *
2492 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2494 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2498 ji->data.target = target;
2504 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2507 return mono_class_check_context_used (klass);
2513 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2516 return mono_method_check_context_used (method);
2522 * check_method_sharing:
2524 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2527 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2529 gboolean pass_vtable = FALSE;
2530 gboolean pass_mrgctx = FALSE;
2532 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2533 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2534 gboolean sharable = FALSE;
2536 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2540 * Pass vtable iff target method might
2541 * be shared, which means that sharing
2542 * is enabled for its class and its
2543 * context is sharable (and it's not a
2546 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2550 if (mini_method_get_context (cmethod) &&
2551 mini_method_get_context (cmethod)->method_inst) {
2552 g_assert (!pass_vtable);
2554 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2557 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2562 if (out_pass_vtable)
2563 *out_pass_vtable = pass_vtable;
2564 if (out_pass_mrgctx)
2565 *out_pass_mrgctx = pass_mrgctx;
2568 inline static MonoCallInst *
2569 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2570 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2574 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2582 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2584 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2586 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2589 call->signature = sig;
2590 call->rgctx_reg = rgctx;
2591 sig_ret = mini_get_underlying_type (sig->ret);
2593 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2596 if (mini_type_is_vtype (sig_ret)) {
2597 call->vret_var = cfg->vret_addr;
2598 //g_assert_not_reached ();
2600 } else if (mini_type_is_vtype (sig_ret)) {
2601 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2604 temp->backend.is_pinvoke = sig->pinvoke;
2607 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2608 * address of return value to increase optimization opportunities.
2609 * Before vtype decomposition, the dreg of the call ins itself represents the
2610 * fact the call modifies the return value. After decomposition, the call will
2611 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2612 * will be transformed into an LDADDR.
2614 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2615 loada->dreg = alloc_preg (cfg);
2616 loada->inst_p0 = temp;
2617 /* We reference the call too since call->dreg could change during optimization */
2618 loada->inst_p1 = call;
2619 MONO_ADD_INS (cfg->cbb, loada);
2621 call->inst.dreg = temp->dreg;
2623 call->vret_var = loada;
2624 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2625 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2627 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2628 if (COMPILE_SOFT_FLOAT (cfg)) {
2630 * If the call has a float argument, we would need to do an r8->r4 conversion using
2631 * an icall, but that cannot be done during the call sequence since it would clobber
2632 * the call registers + the stack. So we do it before emitting the call.
2634 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2636 MonoInst *in = call->args [i];
2638 if (i >= sig->hasthis)
2639 t = sig->params [i - sig->hasthis];
2641 t = &mono_defaults.int_class->byval_arg;
2642 t = mono_type_get_underlying_type (t);
2644 if (!t->byref && t->type == MONO_TYPE_R4) {
2645 MonoInst *iargs [1];
2649 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2651 /* The result will be in an int vreg */
2652 call->args [i] = conv;
2658 call->need_unbox_trampoline = unbox_trampoline;
2661 if (COMPILE_LLVM (cfg))
2662 mono_llvm_emit_call (cfg, call);
2664 mono_arch_emit_call (cfg, call);
2666 mono_arch_emit_call (cfg, call);
2669 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2670 cfg->flags |= MONO_CFG_HAS_CALLS;
2676 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2678 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2679 cfg->uses_rgctx_reg = TRUE;
2680 call->rgctx_reg = TRUE;
2682 call->rgctx_arg_reg = rgctx_reg;
2686 inline static MonoInst*
2687 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2692 gboolean check_sp = FALSE;
2694 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2695 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2697 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2702 rgctx_reg = mono_alloc_preg (cfg);
2703 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2707 if (!cfg->stack_inbalance_var)
2708 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2710 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2711 ins->dreg = cfg->stack_inbalance_var->dreg;
2712 MONO_ADD_INS (cfg->cbb, ins);
2715 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2717 call->inst.sreg1 = addr->dreg;
2720 emit_imt_argument (cfg, call, NULL, imt_arg);
2722 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2727 sp_reg = mono_alloc_preg (cfg);
2729 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2731 MONO_ADD_INS (cfg->cbb, ins);
2733 /* Restore the stack so we don't crash when throwing the exception */
2734 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2735 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2736 MONO_ADD_INS (cfg->cbb, ins);
2738 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2739 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2743 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2745 return (MonoInst*)call;
2749 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2752 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2754 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2757 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2758 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2760 #ifndef DISABLE_REMOTING
2761 gboolean might_be_remote = FALSE;
2763 gboolean virtual_ = this_ins != NULL;
2764 gboolean enable_for_aot = TRUE;
2767 MonoInst *call_target = NULL;
2769 gboolean need_unbox_trampoline;
2772 sig = mono_method_signature (method);
2774 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2775 g_assert_not_reached ();
2778 rgctx_reg = mono_alloc_preg (cfg);
2779 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2782 if (method->string_ctor) {
2783 /* Create the real signature */
2784 /* FIXME: Cache these */
2785 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2786 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2791 context_used = mini_method_check_context_used (cfg, method);
2793 #ifndef DISABLE_REMOTING
2794 might_be_remote = this_ins && sig->hasthis &&
2795 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2796 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2798 if (might_be_remote && context_used) {
2801 g_assert (cfg->gshared);
2803 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2805 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2809 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2810 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2812 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2814 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2816 #ifndef DISABLE_REMOTING
2817 if (might_be_remote)
2818 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2821 call->method = method;
2822 call->inst.flags |= MONO_INST_HAS_METHOD;
2823 call->inst.inst_left = this_ins;
2824 call->tail_call = tail;
2827 int vtable_reg, slot_reg, this_reg;
2830 this_reg = this_ins->dreg;
2832 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2833 MonoInst *dummy_use;
2835 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2837 /* Make a call to delegate->invoke_impl */
2838 call->inst.inst_basereg = this_reg;
2839 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2840 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2842 /* We must emit a dummy use here because the delegate trampoline will
2843 replace the 'this' argument with the delegate target making this activation
2844 no longer a root for the delegate.
2845 This is an issue for delegates that target collectible code such as dynamic
2846 methods of GC'able assemblies.
2848 For a test case look into #667921.
2850 FIXME: a dummy use is not the best way to do it as the local register allocator
2851 will put it on a caller save register and spil it around the call.
2852 Ideally, we would either put it on a callee save register or only do the store part.
2854 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2856 return (MonoInst*)call;
2859 if ((!cfg->compile_aot || enable_for_aot) &&
2860 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2861 (MONO_METHOD_IS_FINAL (method) &&
2862 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2863 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2865 * the method is not virtual, we just need to ensure this is not null
2866 * and then we can call the method directly.
2868 #ifndef DISABLE_REMOTING
2869 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2871 * The check above ensures method is not gshared, this is needed since
2872 * gshared methods can't have wrappers.
2874 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2878 if (!method->string_ctor)
2879 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2881 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2882 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2884 * the method is virtual, but we can statically dispatch since either
2885 * it's class or the method itself are sealed.
2886 * But first we need to ensure it's not a null reference.
2888 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2890 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2891 } else if (call_target) {
2892 vtable_reg = alloc_preg (cfg);
2893 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2895 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2896 call->inst.sreg1 = call_target->dreg;
2897 call->inst.flags &= !MONO_INST_HAS_METHOD;
2899 vtable_reg = alloc_preg (cfg);
2900 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2901 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2902 guint32 imt_slot = mono_method_get_imt_slot (method);
2903 emit_imt_argument (cfg, call, call->method, imt_arg);
2904 slot_reg = vtable_reg;
2905 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2907 slot_reg = vtable_reg;
2908 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2909 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2911 g_assert (mono_method_signature (method)->generic_param_count);
2912 emit_imt_argument (cfg, call, call->method, imt_arg);
2916 call->inst.sreg1 = slot_reg;
2917 call->inst.inst_offset = offset;
2918 call->is_virtual = TRUE;
2922 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2925 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2927 return (MonoInst*)call;
2931 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2933 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2937 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2944 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2947 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2949 return (MonoInst*)call;
2953 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2955 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2959 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2963 * mono_emit_abs_call:
2965 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2967 inline static MonoInst*
2968 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2969 MonoMethodSignature *sig, MonoInst **args)
2971 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2975 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2978 if (cfg->abs_patches == NULL)
2979 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2980 g_hash_table_insert (cfg->abs_patches, ji, ji);
2981 ins = mono_emit_native_call (cfg, ji, sig, args);
2982 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2986 static MonoMethodSignature*
2987 sig_to_rgctx_sig (MonoMethodSignature *sig)
2989 // FIXME: memory allocation
2990 MonoMethodSignature *res;
2993 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2994 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2995 res->param_count = sig->param_count + 1;
2996 for (i = 0; i < sig->param_count; ++i)
2997 res->params [i] = sig->params [i];
2998 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3002 /* Make an indirect call to FSIG passing an additional argument */
3004 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3006 MonoMethodSignature *csig;
3007 MonoInst *args_buf [16];
3009 int i, pindex, tmp_reg;
3011 /* Make a call with an rgctx/extra arg */
3012 if (fsig->param_count + 2 < 16)
3015 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3018 args [pindex ++] = orig_args [0];
3019 for (i = 0; i < fsig->param_count; ++i)
3020 args [pindex ++] = orig_args [fsig->hasthis + i];
3021 tmp_reg = alloc_preg (cfg);
3022 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3023 csig = sig_to_rgctx_sig (fsig);
3024 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3027 /* Emit an indirect call to the function descriptor ADDR */
3029 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3031 int addr_reg, arg_reg;
3032 MonoInst *call_target;
3034 g_assert (cfg->llvm_only);
3037 * addr points to a <addr, arg> pair, load both of them, and
3038 * make a call to addr, passing arg as an extra arg.
3040 addr_reg = alloc_preg (cfg);
3041 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3042 arg_reg = alloc_preg (cfg);
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3045 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3049 direct_icalls_enabled (MonoCompile *cfg)
3051 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3053 if (cfg->compile_llvm)
3056 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3062 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3065 * Call the jit icall without a wrapper if possible.
3066 * The wrapper is needed for the following reasons:
3067 * - to handle exceptions thrown using mono_raise_exceptions () from the
3068 * icall function. The EH code needs the lmf frame pushed by the
3069 * wrapper to be able to unwind back to managed code.
3070 * - to be able to do stack walks for asynchronously suspended
3071 * threads when debugging.
3073 if (info->no_raise && direct_icalls_enabled (cfg)) {
3077 if (!info->wrapper_method) {
3078 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3079 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3081 mono_memory_barrier ();
3085 * Inline the wrapper method, which is basically a call to the C icall, and
3086 * an exception check.
3088 costs = inline_method (cfg, info->wrapper_method, NULL,
3089 args, NULL, cfg->real_offset, TRUE);
3090 g_assert (costs > 0);
3091 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3095 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3100 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3102 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3103 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3107 * Native code might return non register sized integers
3108 * without initializing the upper bits.
3110 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3111 case OP_LOADI1_MEMBASE:
3112 widen_op = OP_ICONV_TO_I1;
3114 case OP_LOADU1_MEMBASE:
3115 widen_op = OP_ICONV_TO_U1;
3117 case OP_LOADI2_MEMBASE:
3118 widen_op = OP_ICONV_TO_I2;
3120 case OP_LOADU2_MEMBASE:
3121 widen_op = OP_ICONV_TO_U2;
3127 if (widen_op != -1) {
3128 int dreg = alloc_preg (cfg);
3131 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3132 widen->type = ins->type;
3142 get_memcpy_method (void)
3144 static MonoMethod *memcpy_method = NULL;
3145 if (!memcpy_method) {
3146 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3148 g_error ("Old corlib found. Install a new one");
3150 return memcpy_method;
3154 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3156 MonoClassField *field;
3157 gpointer iter = NULL;
3159 while ((field = mono_class_get_fields (klass, &iter))) {
3162 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3164 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3165 if (mini_type_is_reference (mono_field_get_type (field))) {
3166 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3167 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3169 MonoClass *field_class = mono_class_from_mono_type (field->type);
3170 if (field_class->has_references)
3171 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3177 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3179 int card_table_shift_bits;
3180 gpointer card_table_mask;
3182 MonoInst *dummy_use;
3183 int nursery_shift_bits;
3184 size_t nursery_size;
3186 if (!cfg->gen_write_barriers)
3189 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3191 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3193 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3196 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3197 wbarrier->sreg1 = ptr->dreg;
3198 wbarrier->sreg2 = value->dreg;
3199 MONO_ADD_INS (cfg->cbb, wbarrier);
3200 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3201 int offset_reg = alloc_preg (cfg);
3205 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3206 if (card_table_mask)
3207 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3209 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3210 * IMM's larger than 32bits.
3212 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3213 card_reg = ins->dreg;
3215 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3216 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3218 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3219 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3222 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3226 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3228 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3229 unsigned need_wb = 0;
3234 /*types with references can't have alignment smaller than sizeof(void*) */
3235 if (align < SIZEOF_VOID_P)
3238 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3239 if (size > 32 * SIZEOF_VOID_P)
3242 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3244 /* We don't unroll more than 5 stores to avoid code bloat. */
3245 if (size > 5 * SIZEOF_VOID_P) {
3246 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3247 size += (SIZEOF_VOID_P - 1);
3248 size &= ~(SIZEOF_VOID_P - 1);
3250 EMIT_NEW_ICONST (cfg, iargs [2], size);
3251 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3252 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3256 destreg = iargs [0]->dreg;
3257 srcreg = iargs [1]->dreg;
3260 dest_ptr_reg = alloc_preg (cfg);
3261 tmp_reg = alloc_preg (cfg);
3264 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3266 while (size >= SIZEOF_VOID_P) {
3267 MonoInst *load_inst;
3268 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3269 load_inst->dreg = tmp_reg;
3270 load_inst->inst_basereg = srcreg;
3271 load_inst->inst_offset = offset;
3272 MONO_ADD_INS (cfg->cbb, load_inst);
3274 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3277 emit_write_barrier (cfg, iargs [0], load_inst);
3279 offset += SIZEOF_VOID_P;
3280 size -= SIZEOF_VOID_P;
3283 /*tmp += sizeof (void*)*/
3284 if (size >= SIZEOF_VOID_P) {
3285 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3286 MONO_ADD_INS (cfg->cbb, iargs [0]);
3290 /* Those cannot be references since size < sizeof (void*) */
3292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3293 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3299 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3300 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3306 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3316 * Emit code to copy a valuetype of type @klass whose address is stored in
3317 * @src->dreg to memory whose address is stored at @dest->dreg.
3320 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3322 MonoInst *iargs [4];
3325 MonoMethod *memcpy_method;
3326 MonoInst *size_ins = NULL;
3327 MonoInst *memcpy_ins = NULL;
3331 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3334 * This check breaks with spilled vars... need to handle it during verification anyway.
3335 * g_assert (klass && klass == src->klass && klass == dest->klass);
3338 if (mini_is_gsharedvt_klass (klass)) {
3340 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3341 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3345 n = mono_class_native_size (klass, &align);
3347 n = mono_class_value_size (klass, &align);
3349 /* if native is true there should be no references in the struct */
3350 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3351 /* Avoid barriers when storing to the stack */
3352 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3353 (dest->opcode == OP_LDADDR))) {
3359 context_used = mini_class_check_context_used (cfg, klass);
3361 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3362 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3364 } else if (context_used) {
3365 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3367 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3368 if (!cfg->compile_aot)
3369 mono_class_compute_gc_descriptor (klass);
3373 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3375 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3380 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3381 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3382 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3387 iargs [2] = size_ins;
3389 EMIT_NEW_ICONST (cfg, iargs [2], n);
3391 memcpy_method = get_memcpy_method ();
3393 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3395 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3400 get_memset_method (void)
3402 static MonoMethod *memset_method = NULL;
3403 if (!memset_method) {
3404 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3406 g_error ("Old corlib found. Install a new one");
3408 return memset_method;
3412 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3414 MonoInst *iargs [3];
3417 MonoMethod *memset_method;
3418 MonoInst *size_ins = NULL;
3419 MonoInst *bzero_ins = NULL;
3420 static MonoMethod *bzero_method;
3422 /* FIXME: Optimize this for the case when dest is an LDADDR */
3423 mono_class_init (klass);
3424 if (mini_is_gsharedvt_klass (klass)) {
3425 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3426 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3428 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3429 g_assert (bzero_method);
3431 iargs [1] = size_ins;
3432 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3436 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3438 n = mono_class_value_size (klass, &align);
3440 if (n <= sizeof (gpointer) * 8) {
3441 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3444 memset_method = get_memset_method ();
3446 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3447 EMIT_NEW_ICONST (cfg, iargs [2], n);
3448 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3455 * Emit IR to return either the this pointer for instance method,
3456 * or the mrgctx for static methods.
3459 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3461 MonoInst *this_ins = NULL;
3463 g_assert (cfg->gshared);
3465 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3466 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3467 !method->klass->valuetype)
3468 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3470 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3471 MonoInst *mrgctx_loc, *mrgctx_var;
3473 g_assert (!this_ins);
3474 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3476 mrgctx_loc = mono_get_vtable_var (cfg);
3477 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3480 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3481 MonoInst *vtable_loc, *vtable_var;
3483 g_assert (!this_ins);
3485 vtable_loc = mono_get_vtable_var (cfg);
3486 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3488 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3489 MonoInst *mrgctx_var = vtable_var;
3492 vtable_reg = alloc_preg (cfg);
3493 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3494 vtable_var->type = STACK_PTR;
3502 vtable_reg = alloc_preg (cfg);
3503 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3508 static MonoJumpInfoRgctxEntry *
3509 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3511 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3512 res->method = method;
3513 res->in_mrgctx = in_mrgctx;
3514 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3515 res->data->type = patch_type;
3516 res->data->data.target = patch_data;
3517 res->info_type = info_type;
3522 static inline MonoInst*
3523 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3525 MonoInst *args [16];
3528 // FIXME: No fastpath since the slot is not a compile time constant
3530 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3531 if (entry->in_mrgctx)
3532 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3534 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3538 * FIXME: This can be called during decompose, which is a problem since it creates
3540 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3542 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3544 MonoBasicBlock *is_null_bb, *end_bb;
3545 MonoInst *res, *ins, *call;
3548 slot = mini_get_rgctx_entry_slot (entry);
3550 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3551 index = MONO_RGCTX_SLOT_INDEX (slot);
3553 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3554 for (depth = 0; ; ++depth) {
3555 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3557 if (index < size - 1)
3562 NEW_BBLOCK (cfg, end_bb);
3563 NEW_BBLOCK (cfg, is_null_bb);
3566 rgctx_reg = rgctx->dreg;
3568 rgctx_reg = alloc_preg (cfg);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3571 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3572 NEW_BBLOCK (cfg, is_null_bb);
3574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3575 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3578 for (i = 0; i < depth; ++i) {
3579 int array_reg = alloc_preg (cfg);
3581 /* load ptr to next array */
3582 if (mrgctx && i == 0)
3583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3586 rgctx_reg = array_reg;
3587 /* is the ptr null? */
3588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3589 /* if yes, jump to actual trampoline */
3590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3594 val_reg = alloc_preg (cfg);
3595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3596 /* is the slot null? */
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3598 /* if yes, jump to actual trampoline */
3599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3602 res_reg = alloc_preg (cfg);
3603 MONO_INST_NEW (cfg, ins, OP_MOVE);
3604 ins->dreg = res_reg;
3605 ins->sreg1 = val_reg;
3606 MONO_ADD_INS (cfg->cbb, ins);
3608 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3611 MONO_START_BB (cfg, is_null_bb);
3613 EMIT_NEW_ICONST (cfg, args [1], index);
3615 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3617 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3618 MONO_INST_NEW (cfg, ins, OP_MOVE);
3619 ins->dreg = res_reg;
3620 ins->sreg1 = call->dreg;
3621 MONO_ADD_INS (cfg->cbb, ins);
3622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3624 MONO_START_BB (cfg, end_bb);
3633 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3636 static inline MonoInst*
3637 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3640 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3642 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3646 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3647 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3649 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3650 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3652 return emit_rgctx_fetch (cfg, rgctx, entry);
3656 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3657 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3659 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3660 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3662 return emit_rgctx_fetch (cfg, rgctx, entry);
3666 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3667 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3669 MonoJumpInfoGSharedVtCall *call_info;
3670 MonoJumpInfoRgctxEntry *entry;
3673 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3674 call_info->sig = sig;
3675 call_info->method = cmethod;
3677 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3678 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3680 return emit_rgctx_fetch (cfg, rgctx, entry);
3684 * emit_get_rgctx_virt_method:
3686 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3689 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3690 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3692 MonoJumpInfoVirtMethod *info;
3693 MonoJumpInfoRgctxEntry *entry;
3696 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3697 info->klass = klass;
3698 info->method = virt_method;
3700 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3701 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3703 return emit_rgctx_fetch (cfg, rgctx, entry);
3707 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3708 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3710 MonoJumpInfoRgctxEntry *entry;
3713 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3714 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3716 return emit_rgctx_fetch (cfg, rgctx, entry);
3720 * emit_get_rgctx_method:
3722 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3723 * normal constants, else emit a load from the rgctx.
3726 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3727 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3729 if (!context_used) {
3732 switch (rgctx_type) {
3733 case MONO_RGCTX_INFO_METHOD:
3734 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3736 case MONO_RGCTX_INFO_METHOD_RGCTX:
3737 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3740 g_assert_not_reached ();
3743 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3744 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3746 return emit_rgctx_fetch (cfg, rgctx, entry);
3751 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3752 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3754 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3755 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3757 return emit_rgctx_fetch (cfg, rgctx, entry);
3761 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3763 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3764 MonoRuntimeGenericContextInfoTemplate *template_;
3769 for (i = 0; i < info->num_entries; ++i) {
3770 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3772 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3776 if (info->num_entries == info->count_entries) {
3777 MonoRuntimeGenericContextInfoTemplate *new_entries;
3778 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3780 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3782 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3783 info->entries = new_entries;
3784 info->count_entries = new_count_entries;
3787 idx = info->num_entries;
3788 template_ = &info->entries [idx];
3789 template_->info_type = rgctx_type;
3790 template_->data = data;
3792 info->num_entries ++;
3798 * emit_get_gsharedvt_info:
3800 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3803 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3808 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3809 /* Load info->entries [idx] */
3810 dreg = alloc_preg (cfg);
3811 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3817 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3819 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3823 * On return the caller must check @klass for load errors.
3826 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3828 MonoInst *vtable_arg;
3831 context_used = mini_class_check_context_used (cfg, klass);
3834 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3835 klass, MONO_RGCTX_INFO_VTABLE);
3837 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3841 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3844 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3848 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3849 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3851 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3852 ins->sreg1 = vtable_arg->dreg;
3853 MONO_ADD_INS (cfg->cbb, ins);
3855 static int byte_offset = -1;
3856 static guint8 bitmask;
3857 int bits_reg, inited_reg;
3858 MonoBasicBlock *inited_bb;
3859 MonoInst *args [16];
3861 if (byte_offset < 0)
3862 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3864 bits_reg = alloc_ireg (cfg);
3865 inited_reg = alloc_ireg (cfg);
3867 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3870 NEW_BBLOCK (cfg, inited_bb);
3872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3875 args [0] = vtable_arg;
3876 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3878 MONO_START_BB (cfg, inited_bb);
3883 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3887 if (cfg->gen_seq_points && cfg->method == method) {
3888 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3890 ins->flags |= MONO_INST_NONEMPTY_STACK;
3891 MONO_ADD_INS (cfg->cbb, ins);
3896 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3898 if (mini_get_debug_options ()->better_cast_details) {
3899 int vtable_reg = alloc_preg (cfg);
3900 int klass_reg = alloc_preg (cfg);
3901 MonoBasicBlock *is_null_bb = NULL;
3903 int to_klass_reg, context_used;
3906 NEW_BBLOCK (cfg, is_null_bb);
3908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3909 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3912 tls_get = mono_get_jit_tls_intrinsic (cfg);
3914 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3918 MONO_ADD_INS (cfg->cbb, tls_get);
3919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3920 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3924 context_used = mini_class_check_context_used (cfg, klass);
3926 MonoInst *class_ins;
3928 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3929 to_klass_reg = class_ins->dreg;
3931 to_klass_reg = alloc_preg (cfg);
3932 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3934 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3937 MONO_START_BB (cfg, is_null_bb);
3942 reset_cast_details (MonoCompile *cfg)
3944 /* Reset the variables holding the cast details */
3945 if (mini_get_debug_options ()->better_cast_details) {
3946 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3948 MONO_ADD_INS (cfg->cbb, tls_get);
3949 /* It is enough to reset the from field */
3950 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3955 * On return the caller must check @array_class for load errors
3958 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3960 int vtable_reg = alloc_preg (cfg);
3963 context_used = mini_class_check_context_used (cfg, array_class);
3965 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3967 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3969 if (cfg->opt & MONO_OPT_SHARED) {
3970 int class_reg = alloc_preg (cfg);
3973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3974 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3975 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3976 } else if (context_used) {
3977 MonoInst *vtable_ins;
3979 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3980 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3982 if (cfg->compile_aot) {
3986 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3988 vt_reg = alloc_preg (cfg);
3989 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3990 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3993 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3995 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3999 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4001 reset_cast_details (cfg);
4005 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4006 * generic code is generated.
4009 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4011 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4014 MonoInst *rgctx, *addr;
4016 /* FIXME: What if the class is shared? We might not
4017 have to get the address of the method from the
4019 addr = emit_get_rgctx_method (cfg, context_used, method,
4020 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4021 if (cfg->llvm_only && cfg->gsharedvt) {
4022 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4024 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4026 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4029 gboolean pass_vtable, pass_mrgctx;
4030 MonoInst *rgctx_arg = NULL;
4032 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4033 g_assert (!pass_mrgctx);
4036 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4039 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4042 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4047 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4051 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4052 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4053 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4054 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4056 obj_reg = sp [0]->dreg;
4057 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4058 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4060 /* FIXME: generics */
4061 g_assert (klass->rank == 0);
4064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4065 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4071 MonoInst *element_class;
4073 /* This assertion is from the unboxcast insn */
4074 g_assert (klass->rank == 0);
4076 element_class = emit_get_rgctx_klass (cfg, context_used,
4077 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4079 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4080 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4082 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4083 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4084 reset_cast_details (cfg);
4087 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4088 MONO_ADD_INS (cfg->cbb, add);
4089 add->type = STACK_MP;
4096 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4098 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4099 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4103 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4109 args [1] = klass_inst;
4112 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4114 NEW_BBLOCK (cfg, is_ref_bb);
4115 NEW_BBLOCK (cfg, is_nullable_bb);
4116 NEW_BBLOCK (cfg, end_bb);
4117 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4118 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4119 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4124 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4125 addr_reg = alloc_dreg (cfg, STACK_MP);
4129 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4130 MONO_ADD_INS (cfg->cbb, addr);
4132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4135 MONO_START_BB (cfg, is_ref_bb);
4137 /* Save the ref to a temporary */
4138 dreg = alloc_ireg (cfg);
4139 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4140 addr->dreg = addr_reg;
4141 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4145 MONO_START_BB (cfg, is_nullable_bb);
4148 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4149 MonoInst *unbox_call;
4150 MonoMethodSignature *unbox_sig;
4152 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4153 unbox_sig->ret = &klass->byval_arg;
4154 unbox_sig->param_count = 1;
4155 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4158 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4160 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4162 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4163 addr->dreg = addr_reg;
4166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4169 MONO_START_BB (cfg, end_bb);
4172 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4178 * Returns NULL and set the cfg exception on error.
4181 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4183 MonoInst *iargs [2];
4188 MonoRgctxInfoType rgctx_info;
4189 MonoInst *iargs [2];
4190 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4192 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4194 if (cfg->opt & MONO_OPT_SHARED)
4195 rgctx_info = MONO_RGCTX_INFO_KLASS;
4197 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4198 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4200 if (cfg->opt & MONO_OPT_SHARED) {
4201 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4203 alloc_ftn = ves_icall_object_new;
4206 alloc_ftn = ves_icall_object_new_specific;
4209 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4210 if (known_instance_size) {
4211 int size = mono_class_instance_size (klass);
4212 if (size < sizeof (MonoObject))
4213 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4215 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4217 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4220 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4223 if (cfg->opt & MONO_OPT_SHARED) {
4224 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4225 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4227 alloc_ftn = ves_icall_object_new;
4228 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4229 /* This happens often in argument checking code, eg. throw new FooException... */
4230 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4231 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4232 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4234 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4235 MonoMethod *managed_alloc = NULL;
4239 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4240 cfg->exception_ptr = klass;
4244 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4246 if (managed_alloc) {
4247 int size = mono_class_instance_size (klass);
4248 if (size < sizeof (MonoObject))
4249 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4251 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4252 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4253 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4255 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4257 guint32 lw = vtable->klass->instance_size;
4258 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4259 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4260 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4263 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4267 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4271 * Returns NULL and set the cfg exception on error.
4274 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4276 MonoInst *alloc, *ins;
4278 if (mono_class_is_nullable (klass)) {
4279 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4282 if (cfg->llvm_only && cfg->gsharedvt) {
4283 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4284 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4285 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4287 /* FIXME: What if the class is shared? We might not
4288 have to get the method address from the RGCTX. */
4289 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4290 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4291 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4293 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4296 gboolean pass_vtable, pass_mrgctx;
4297 MonoInst *rgctx_arg = NULL;
4299 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4300 g_assert (!pass_mrgctx);
4303 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4306 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4309 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4313 if (mini_is_gsharedvt_klass (klass)) {
4314 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4315 MonoInst *res, *is_ref, *src_var, *addr;
4318 dreg = alloc_ireg (cfg);
4320 NEW_BBLOCK (cfg, is_ref_bb);
4321 NEW_BBLOCK (cfg, is_nullable_bb);
4322 NEW_BBLOCK (cfg, end_bb);
4323 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4331 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4335 ins->opcode = OP_STOREV_MEMBASE;
4337 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4338 res->type = STACK_OBJ;
4340 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4343 MONO_START_BB (cfg, is_ref_bb);
4345 /* val is a vtype, so has to load the value manually */
4346 src_var = get_vreg_to_inst (cfg, val->dreg);
4348 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4349 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4354 MONO_START_BB (cfg, is_nullable_bb);
4357 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4358 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4360 MonoMethodSignature *box_sig;
4363 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4364 * construct that method at JIT time, so have to do things by hand.
4366 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4367 box_sig->ret = &mono_defaults.object_class->byval_arg;
4368 box_sig->param_count = 1;
4369 box_sig->params [0] = &klass->byval_arg;
4372 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4374 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4375 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4376 res->type = STACK_OBJ;
4380 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4382 MONO_START_BB (cfg, end_bb);
4386 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4390 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4396 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4399 MonoGenericContainer *container;
4400 MonoGenericInst *ginst;
4402 if (klass->generic_class) {
4403 container = klass->generic_class->container_class->generic_container;
4404 ginst = klass->generic_class->context.class_inst;
4405 } else if (klass->generic_container && context_used) {
4406 container = klass->generic_container;
4407 ginst = container->context.class_inst;
4412 for (i = 0; i < container->type_argc; ++i) {
4414 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4416 type = ginst->type_argv [i];
4417 if (mini_type_is_reference (type))
4423 static GHashTable* direct_icall_type_hash;
4426 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4428 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4429 if (!direct_icalls_enabled (cfg))
4433 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4434 * Whitelist a few icalls for now.
4436 if (!direct_icall_type_hash) {
4437 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4439 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4440 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4441 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4442 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4443 mono_memory_barrier ();
4444 direct_icall_type_hash = h;
4447 if (cmethod->klass == mono_defaults.math_class)
4449 /* No locking needed */
4450 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4455 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4458 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4460 MonoMethod *mono_castclass;
4463 mono_castclass = mono_marshal_get_castclass_with_cache ();
4465 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4466 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4467 reset_cast_details (cfg);
4473 get_castclass_cache_idx (MonoCompile *cfg)
4475 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4476 cfg->castclass_cache_index ++;
4477 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4481 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4490 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4493 idx = get_castclass_cache_idx (cfg);
4494 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4496 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4497 return emit_castclass_with_cache (cfg, klass, args);
4501 * Returns NULL and set the cfg exception on error.
4504 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4506 MonoBasicBlock *is_null_bb;
4507 int obj_reg = src->dreg;
4508 int vtable_reg = alloc_preg (cfg);
4510 MonoInst *klass_inst = NULL, *res;
4512 context_used = mini_class_check_context_used (cfg, klass);
4514 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4515 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4516 (*inline_costs) += 2;
4518 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4519 MonoMethod *mono_castclass;
4520 MonoInst *iargs [1];
4523 mono_castclass = mono_marshal_get_castclass (klass);
4526 save_cast_details (cfg, klass, src->dreg, TRUE);
4527 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4528 iargs, ip, cfg->real_offset, TRUE);
4529 reset_cast_details (cfg);
4530 CHECK_CFG_EXCEPTION;
4531 g_assert (costs > 0);
4533 cfg->real_offset += 5;
4535 (*inline_costs) += costs;
4543 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4544 MonoInst *cache_ins;
4546 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4551 /* klass - it's the second element of the cache entry*/
4552 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4555 args [2] = cache_ins;
4557 return emit_castclass_with_cache (cfg, klass, args);
4560 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4563 NEW_BBLOCK (cfg, is_null_bb);
4565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4568 save_cast_details (cfg, klass, obj_reg, FALSE);
4570 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4572 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4574 int klass_reg = alloc_preg (cfg);
4576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4578 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4579 /* the remoting code is broken, access the class for now */
4580 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4581 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4583 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4584 cfg->exception_ptr = klass;
4587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4592 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4595 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4599 MONO_START_BB (cfg, is_null_bb);
4601 reset_cast_details (cfg);
4610 * Returns NULL and set the cfg exception on error.
4613 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4616 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4617 int obj_reg = src->dreg;
4618 int vtable_reg = alloc_preg (cfg);
4619 int res_reg = alloc_ireg_ref (cfg);
4620 MonoInst *klass_inst = NULL;
4625 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4626 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4627 MonoInst *cache_ins;
4629 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4634 /* klass - it's the second element of the cache entry*/
4635 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4638 args [2] = cache_ins;
4640 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4643 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4646 NEW_BBLOCK (cfg, is_null_bb);
4647 NEW_BBLOCK (cfg, false_bb);
4648 NEW_BBLOCK (cfg, end_bb);
4650 /* Do the assignment at the beginning, so the other assignment can be if converted */
4651 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4652 ins->type = STACK_OBJ;
4655 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4658 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4660 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4661 g_assert (!context_used);
4662 /* the is_null_bb target simply copies the input register to the output */
4663 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4665 int klass_reg = alloc_preg (cfg);
4668 int rank_reg = alloc_preg (cfg);
4669 int eclass_reg = alloc_preg (cfg);
4671 g_assert (!context_used);
4672 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4676 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4677 if (klass->cast_class == mono_defaults.object_class) {
4678 int parent_reg = alloc_preg (cfg);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4680 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4681 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4683 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4684 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4685 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4687 } else if (klass->cast_class == mono_defaults.enum_class) {
4688 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4690 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4691 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4693 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4694 /* Check that the object is a vector too */
4695 int bounds_reg = alloc_preg (cfg);
4696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4698 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4701 /* the is_null_bb target simply copies the input register to the output */
4702 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4704 } else if (mono_class_is_nullable (klass)) {
4705 g_assert (!context_used);
4706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4707 /* the is_null_bb target simply copies the input register to the output */
4708 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4710 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4711 g_assert (!context_used);
4712 /* the remoting code is broken, access the class for now */
4713 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4714 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4716 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4717 cfg->exception_ptr = klass;
4720 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4729 /* the is_null_bb target simply copies the input register to the output */
4730 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4735 MONO_START_BB (cfg, false_bb);
4737 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4738 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4740 MONO_START_BB (cfg, is_null_bb);
4742 MONO_START_BB (cfg, end_bb);
4748 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4750 /* This opcode takes as input an object reference and a class, and returns:
4751 0) if the object is an instance of the class,
4752 1) if the object is not instance of the class,
4753 2) if the object is a proxy whose type cannot be determined */
4756 #ifndef DISABLE_REMOTING
4757 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4759 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4761 int obj_reg = src->dreg;
4762 int dreg = alloc_ireg (cfg);
4764 #ifndef DISABLE_REMOTING
4765 int klass_reg = alloc_preg (cfg);
4768 NEW_BBLOCK (cfg, true_bb);
4769 NEW_BBLOCK (cfg, false_bb);
4770 NEW_BBLOCK (cfg, end_bb);
4771 #ifndef DISABLE_REMOTING
4772 NEW_BBLOCK (cfg, false2_bb);
4773 NEW_BBLOCK (cfg, no_proxy_bb);
4776 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4777 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4779 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4780 #ifndef DISABLE_REMOTING
4781 NEW_BBLOCK (cfg, interface_fail_bb);
4784 tmp_reg = alloc_preg (cfg);
4785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4786 #ifndef DISABLE_REMOTING
4787 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4788 MONO_START_BB (cfg, interface_fail_bb);
4789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4791 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4793 tmp_reg = alloc_preg (cfg);
4794 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4795 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4796 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4798 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4801 #ifndef DISABLE_REMOTING
4802 tmp_reg = alloc_preg (cfg);
4803 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4806 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4807 tmp_reg = alloc_preg (cfg);
4808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4811 tmp_reg = alloc_preg (cfg);
4812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4816 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4817 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4819 MONO_START_BB (cfg, no_proxy_bb);
4821 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4823 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4827 MONO_START_BB (cfg, false_bb);
4829 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4830 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4832 #ifndef DISABLE_REMOTING
4833 MONO_START_BB (cfg, false2_bb);
4835 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4839 MONO_START_BB (cfg, true_bb);
4841 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4843 MONO_START_BB (cfg, end_bb);
4846 MONO_INST_NEW (cfg, ins, OP_ICONST);
4848 ins->type = STACK_I4;
4854 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4856 /* This opcode takes as input an object reference and a class, and returns:
4857 0) if the object is an instance of the class,
4858 1) if the object is a proxy whose type cannot be determined
4859 an InvalidCastException exception is thrown otherwhise*/
4862 #ifndef DISABLE_REMOTING
4863 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4865 MonoBasicBlock *ok_result_bb;
4867 int obj_reg = src->dreg;
4868 int dreg = alloc_ireg (cfg);
4869 int tmp_reg = alloc_preg (cfg);
4871 #ifndef DISABLE_REMOTING
4872 int klass_reg = alloc_preg (cfg);
4873 NEW_BBLOCK (cfg, end_bb);
4876 NEW_BBLOCK (cfg, ok_result_bb);
4878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4879 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4881 save_cast_details (cfg, klass, obj_reg, FALSE);
4883 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4884 #ifndef DISABLE_REMOTING
4885 NEW_BBLOCK (cfg, interface_fail_bb);
4887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4888 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4889 MONO_START_BB (cfg, interface_fail_bb);
4890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4892 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4894 tmp_reg = alloc_preg (cfg);
4895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4897 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4899 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4902 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4903 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4904 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4907 #ifndef DISABLE_REMOTING
4908 NEW_BBLOCK (cfg, no_proxy_bb);
4910 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4911 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4912 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4914 tmp_reg = alloc_preg (cfg);
4915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4918 tmp_reg = alloc_preg (cfg);
4919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4921 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4923 NEW_BBLOCK (cfg, fail_1_bb);
4925 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4927 MONO_START_BB (cfg, fail_1_bb);
4929 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4932 MONO_START_BB (cfg, no_proxy_bb);
4934 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4936 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4940 MONO_START_BB (cfg, ok_result_bb);
4942 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4944 #ifndef DISABLE_REMOTING
4945 MONO_START_BB (cfg, end_bb);
4949 MONO_INST_NEW (cfg, ins, OP_ICONST);
4951 ins->type = STACK_I4;
4956 static G_GNUC_UNUSED MonoInst*
4957 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4959 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4960 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4963 switch (enum_type->type) {
4966 #if SIZEOF_REGISTER == 8
4978 MonoInst *load, *and_, *cmp, *ceq;
4979 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4980 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4981 int dest_reg = alloc_ireg (cfg);
4983 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4984 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4985 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4986 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4988 ceq->type = STACK_I4;
4991 load = mono_decompose_opcode (cfg, load);
4992 and_ = mono_decompose_opcode (cfg, and_);
4993 cmp = mono_decompose_opcode (cfg, cmp);
4994 ceq = mono_decompose_opcode (cfg, ceq);
5002 * Returns NULL and set the cfg exception on error.
5004 static G_GNUC_UNUSED MonoInst*
5005 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5009 gpointer trampoline;
5010 MonoInst *obj, *method_ins, *tramp_ins;
5014 if (virtual_ && !cfg->llvm_only) {
5015 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5018 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5022 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5026 /* Inline the contents of mono_delegate_ctor */
5028 /* Set target field */
5029 /* Optimize away setting of NULL target */
5030 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5031 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5032 if (cfg->gen_write_barriers) {
5033 dreg = alloc_preg (cfg);
5034 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5035 emit_write_barrier (cfg, ptr, target);
5039 /* Set method field */
5040 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5041 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5044 * To avoid looking up the compiled code belonging to the target method
5045 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5046 * store it, and we fill it after the method has been compiled.
5048 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5049 MonoInst *code_slot_ins;
5052 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5054 domain = mono_domain_get ();
5055 mono_domain_lock (domain);
5056 if (!domain_jit_info (domain)->method_code_hash)
5057 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5058 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5060 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5061 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5063 mono_domain_unlock (domain);
5065 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5067 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5070 if (cfg->llvm_only) {
5071 MonoInst *args [16];
5076 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5077 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5080 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5086 if (cfg->compile_aot) {
5087 MonoDelegateClassMethodPair *del_tramp;
5089 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5090 del_tramp->klass = klass;
5091 del_tramp->method = context_used ? NULL : method;
5092 del_tramp->is_virtual = virtual_;
5093 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5096 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5098 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5099 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5102 /* Set invoke_impl field */
5104 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5106 dreg = alloc_preg (cfg);
5107 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5110 dreg = alloc_preg (cfg);
5111 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5112 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5115 dreg = alloc_preg (cfg);
5116 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5117 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5119 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5125 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5127 MonoJitICallInfo *info;
5129 /* Need to register the icall so it gets an icall wrapper */
5130 info = mono_get_array_new_va_icall (rank);
5132 cfg->flags |= MONO_CFG_HAS_VARARGS;
5134 /* mono_array_new_va () needs a vararg calling convention */
5135 cfg->exception_message = g_strdup ("array-new");
5136 cfg->disable_llvm = TRUE;
5138 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5139 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5143 * handle_constrained_gsharedvt_call:
5145 * Handle constrained calls where the receiver is a gsharedvt type.
5146 * Return the instruction representing the call. Set the cfg exception on failure.
5149 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5150 gboolean *ref_emit_widen)
5152 MonoInst *ins = NULL;
5153 gboolean emit_widen = *ref_emit_widen;
5156 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5157 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5158 * pack the arguments into an array, and do the rest of the work in in an icall.
5160 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5161 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5162 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5163 MonoInst *args [16];
5166 * This case handles calls to
5167 * - object:ToString()/Equals()/GetHashCode(),
5168 * - System.IComparable<T>:CompareTo()
5169 * - System.IEquatable<T>:Equals ()
5170 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5174 if (mono_method_check_context_used (cmethod))
5175 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5177 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5178 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5180 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5181 if (fsig->hasthis && fsig->param_count) {
5182 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5183 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5184 ins->dreg = alloc_preg (cfg);
5185 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5186 MONO_ADD_INS (cfg->cbb, ins);
5189 if (mini_is_gsharedvt_type (fsig->params [0])) {
5190 int addr_reg, deref_arg_reg;
5192 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5193 deref_arg_reg = alloc_preg (cfg);
5194 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5195 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5197 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5198 addr_reg = ins->dreg;
5199 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5201 EMIT_NEW_ICONST (cfg, args [3], 0);
5202 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5205 EMIT_NEW_ICONST (cfg, args [3], 0);
5206 EMIT_NEW_ICONST (cfg, args [4], 0);
5208 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5211 if (mini_is_gsharedvt_type (fsig->ret)) {
5212 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5213 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5217 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5218 MONO_ADD_INS (cfg->cbb, add);
5220 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5221 MONO_ADD_INS (cfg->cbb, ins);
5222 /* ins represents the call result */
5225 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5228 *ref_emit_widen = emit_widen;
5237 mono_emit_load_got_addr (MonoCompile *cfg)
5239 MonoInst *getaddr, *dummy_use;
5241 if (!cfg->got_var || cfg->got_var_allocated)
5244 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5245 getaddr->cil_code = cfg->header->code;
5246 getaddr->dreg = cfg->got_var->dreg;
5248 /* Add it to the start of the first bblock */
5249 if (cfg->bb_entry->code) {
5250 getaddr->next = cfg->bb_entry->code;
5251 cfg->bb_entry->code = getaddr;
5254 MONO_ADD_INS (cfg->bb_entry, getaddr);
5256 cfg->got_var_allocated = TRUE;
5259 * Add a dummy use to keep the got_var alive, since real uses might
5260 * only be generated by the back ends.
5261 * Add it to end_bblock, so the variable's lifetime covers the whole
5263 * It would be better to make the usage of the got var explicit in all
5264 * cases when the backend needs it (i.e. calls, throw etc.), so this
5265 * wouldn't be needed.
5267 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5268 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5271 static int inline_limit;
5272 static gboolean inline_limit_inited;
5275 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5277 MonoMethodHeaderSummary header;
5279 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5280 MonoMethodSignature *sig = mono_method_signature (method);
5284 if (cfg->disable_inline)
5289 if (cfg->inline_depth > 10)
5292 if (!mono_method_get_header_summary (method, &header))
5295 /*runtime, icall and pinvoke are checked by summary call*/
5296 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5297 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5298 (mono_class_is_marshalbyref (method->klass)) ||
5302 /* also consider num_locals? */
5303 /* Do the size check early to avoid creating vtables */
5304 if (!inline_limit_inited) {
5305 if (g_getenv ("MONO_INLINELIMIT"))
5306 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5308 inline_limit = INLINE_LENGTH_LIMIT;
5309 inline_limit_inited = TRUE;
5311 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5315 * if we can initialize the class of the method right away, we do,
5316 * otherwise we don't allow inlining if the class needs initialization,
5317 * since it would mean inserting a call to mono_runtime_class_init()
5318 * inside the inlined code
5320 if (!(cfg->opt & MONO_OPT_SHARED)) {
5321 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5322 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5323 vtable = mono_class_vtable (cfg->domain, method->klass);
5326 if (!cfg->compile_aot)
5327 mono_runtime_class_init (vtable);
5328 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5329 if (cfg->run_cctors && method->klass->has_cctor) {
5330 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5331 if (!method->klass->runtime_info)
5332 /* No vtable created yet */
5334 vtable = mono_class_vtable (cfg->domain, method->klass);
5337 /* This makes so that inline cannot trigger */
5338 /* .cctors: too many apps depend on them */
5339 /* running with a specific order... */
5340 if (! vtable->initialized)
5342 mono_runtime_class_init (vtable);
5344 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5345 if (!method->klass->runtime_info)
5346 /* No vtable created yet */
5348 vtable = mono_class_vtable (cfg->domain, method->klass);
5351 if (!vtable->initialized)
5356 * If we're compiling for shared code
5357 * the cctor will need to be run at aot method load time, for example,
5358 * or at the end of the compilation of the inlining method.
5360 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5364 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5365 if (mono_arch_is_soft_float ()) {
5367 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5369 for (i = 0; i < sig->param_count; ++i)
5370 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5375 if (g_list_find (cfg->dont_inline, method))
5382 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5384 if (!cfg->compile_aot) {
5386 if (vtable->initialized)
5390 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5391 if (cfg->method == method)
5395 if (!mono_class_needs_cctor_run (klass, method))
5398 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5399 /* The initialization is already done before the method is called */
5406 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5410 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5413 if (mini_is_gsharedvt_variable_klass (klass)) {
5416 mono_class_init (klass);
5417 size = mono_class_array_element_size (klass);
5420 mult_reg = alloc_preg (cfg);
5421 array_reg = arr->dreg;
5422 index_reg = index->dreg;
5424 #if SIZEOF_REGISTER == 8
5425 /* The array reg is 64 bits but the index reg is only 32 */
5426 if (COMPILE_LLVM (cfg)) {
5428 index2_reg = index_reg;
5430 index2_reg = alloc_preg (cfg);
5431 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5434 if (index->type == STACK_I8) {
5435 index2_reg = alloc_preg (cfg);
5436 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5438 index2_reg = index_reg;
5443 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5446 if (size == 1 || size == 2 || size == 4 || size == 8) {
5447 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5449 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5450 ins->klass = mono_class_get_element_class (klass);
5451 ins->type = STACK_MP;
5457 add_reg = alloc_ireg_mp (cfg);
5460 MonoInst *rgctx_ins;
5463 g_assert (cfg->gshared);
5464 context_used = mini_class_check_context_used (cfg, klass);
5465 g_assert (context_used);
5466 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5467 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5471 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5472 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5473 ins->klass = mono_class_get_element_class (klass);
5474 ins->type = STACK_MP;
5475 MONO_ADD_INS (cfg->cbb, ins);
5481 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5483 int bounds_reg = alloc_preg (cfg);
5484 int add_reg = alloc_ireg_mp (cfg);
5485 int mult_reg = alloc_preg (cfg);
5486 int mult2_reg = alloc_preg (cfg);
5487 int low1_reg = alloc_preg (cfg);
5488 int low2_reg = alloc_preg (cfg);
5489 int high1_reg = alloc_preg (cfg);
5490 int high2_reg = alloc_preg (cfg);
5491 int realidx1_reg = alloc_preg (cfg);
5492 int realidx2_reg = alloc_preg (cfg);
5493 int sum_reg = alloc_preg (cfg);
5494 int index1, index2, tmpreg;
5498 mono_class_init (klass);
5499 size = mono_class_array_element_size (klass);
5501 index1 = index_ins1->dreg;
5502 index2 = index_ins2->dreg;
5504 #if SIZEOF_REGISTER == 8
5505 /* The array reg is 64 bits but the index reg is only 32 */
5506 if (COMPILE_LLVM (cfg)) {
5509 tmpreg = alloc_preg (cfg);
5510 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5512 tmpreg = alloc_preg (cfg);
5513 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5517 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5521 /* range checking */
5522 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5523 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5526 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5527 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5528 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5529 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5530 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5531 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5534 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5535 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5537 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5538 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5539 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5541 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5542 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5544 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5545 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5547 ins->type = STACK_MP;
5549 MONO_ADD_INS (cfg->cbb, ins);
5555 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5559 MonoMethod *addr_method;
5561 MonoClass *eclass = cmethod->klass->element_class;
5563 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5566 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5568 /* emit_ldelema_2 depends on OP_LMUL */
5569 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5570 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5573 if (mini_is_gsharedvt_variable_klass (eclass))
5576 element_size = mono_class_array_element_size (eclass);
5577 addr_method = mono_marshal_get_array_address (rank, element_size);
5578 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5583 static MonoBreakPolicy
5584 always_insert_breakpoint (MonoMethod *method)
5586 return MONO_BREAK_POLICY_ALWAYS;
5589 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5592 * mono_set_break_policy:
5593 * policy_callback: the new callback function
5595 * Allow embedders to decide wherther to actually obey breakpoint instructions
5596 * (both break IL instructions and Debugger.Break () method calls), for example
5597 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5598 * untrusted or semi-trusted code.
5600 * @policy_callback will be called every time a break point instruction needs to
5601 * be inserted with the method argument being the method that calls Debugger.Break()
5602 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5603 * if it wants the breakpoint to not be effective in the given method.
5604 * #MONO_BREAK_POLICY_ALWAYS is the default.
5607 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5609 if (policy_callback)
5610 break_policy_func = policy_callback;
5612 break_policy_func = always_insert_breakpoint;
5616 should_insert_brekpoint (MonoMethod *method) {
5617 switch (break_policy_func (method)) {
5618 case MONO_BREAK_POLICY_ALWAYS:
5620 case MONO_BREAK_POLICY_NEVER:
5622 case MONO_BREAK_POLICY_ON_DBG:
5623 g_warning ("mdb no longer supported");
5626 g_warning ("Incorrect value returned from break policy callback");
5631 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5633 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5635 MonoInst *addr, *store, *load;
5636 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5638 /* the bounds check is already done by the callers */
5639 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5641 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5642 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5643 if (mini_type_is_reference (fsig->params [2]))
5644 emit_write_barrier (cfg, addr, load);
5646 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5647 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5654 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5656 return mini_type_is_reference (&klass->byval_arg);
5660 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5662 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5663 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5664 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5665 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5666 MonoInst *iargs [3];
5669 mono_class_setup_vtable (obj_array);
5670 g_assert (helper->slot);
5672 if (sp [0]->type != STACK_OBJ)
5674 if (sp [2]->type != STACK_OBJ)
5681 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5685 if (mini_is_gsharedvt_variable_klass (klass)) {
5688 // FIXME-VT: OP_ICONST optimization
5689 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5690 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5691 ins->opcode = OP_STOREV_MEMBASE;
5692 } else if (sp [1]->opcode == OP_ICONST) {
5693 int array_reg = sp [0]->dreg;
5694 int index_reg = sp [1]->dreg;
5695 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5697 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5698 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5701 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5702 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5704 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5705 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5706 if (generic_class_is_reference_type (cfg, klass))
5707 emit_write_barrier (cfg, addr, sp [2]);
5714 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5719 eklass = mono_class_from_mono_type (fsig->params [2]);
5721 eklass = mono_class_from_mono_type (fsig->ret);
5724 return emit_array_store (cfg, eklass, args, FALSE);
5726 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5727 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5733 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5736 int param_size, return_size;
5738 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5739 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5741 if (cfg->verbose_level > 3)
5742 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5744 //Don't allow mixing reference types with value types
5745 if (param_klass->valuetype != return_klass->valuetype) {
5746 if (cfg->verbose_level > 3)
5747 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5751 if (!param_klass->valuetype) {
5752 if (cfg->verbose_level > 3)
5753 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5758 if (param_klass->has_references || return_klass->has_references)
5761 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5762 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5763 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5764 if (cfg->verbose_level > 3)
5765 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5769 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5770 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5771 if (cfg->verbose_level > 3)
5772 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5776 param_size = mono_class_value_size (param_klass, &align);
5777 return_size = mono_class_value_size (return_klass, &align);
5779 //We can do it if sizes match
5780 if (param_size == return_size) {
5781 if (cfg->verbose_level > 3)
5782 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5786 //No simple way to handle struct if sizes don't match
5787 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5788 if (cfg->verbose_level > 3)
5789 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5794 * Same reg size category.
5795 * A quick note on why we don't require widening here.
5796 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5798 * Since the source value comes from a function argument, the JIT will already have
5799 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5801 if (param_size <= 4 && return_size <= 4) {
5802 if (cfg->verbose_level > 3)
5803 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5811 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5813 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5814 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5816 if (mini_is_gsharedvt_variable_type (fsig->ret))
5819 //Valuetypes that are semantically equivalent or numbers than can be widened to
5820 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5823 //Arrays of valuetypes that are semantically equivalent
5824 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5831 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5833 #ifdef MONO_ARCH_SIMD_INTRINSICS
5834 MonoInst *ins = NULL;
5836 if (cfg->opt & MONO_OPT_SIMD) {
5837 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5843 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5847 emit_memory_barrier (MonoCompile *cfg, int kind)
5849 MonoInst *ins = NULL;
5850 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5851 MONO_ADD_INS (cfg->cbb, ins);
5852 ins->backend.memory_barrier_kind = kind;
5858 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5860 MonoInst *ins = NULL;
5863 /* The LLVM backend supports these intrinsics */
5864 if (cmethod->klass == mono_defaults.math_class) {
5865 if (strcmp (cmethod->name, "Sin") == 0) {
5867 } else if (strcmp (cmethod->name, "Cos") == 0) {
5869 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5871 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5875 if (opcode && fsig->param_count == 1) {
5876 MONO_INST_NEW (cfg, ins, opcode);
5877 ins->type = STACK_R8;
5878 ins->dreg = mono_alloc_freg (cfg);
5879 ins->sreg1 = args [0]->dreg;
5880 MONO_ADD_INS (cfg->cbb, ins);
5884 if (cfg->opt & MONO_OPT_CMOV) {
5885 if (strcmp (cmethod->name, "Min") == 0) {
5886 if (fsig->params [0]->type == MONO_TYPE_I4)
5888 if (fsig->params [0]->type == MONO_TYPE_U4)
5889 opcode = OP_IMIN_UN;
5890 else if (fsig->params [0]->type == MONO_TYPE_I8)
5892 else if (fsig->params [0]->type == MONO_TYPE_U8)
5893 opcode = OP_LMIN_UN;
5894 } else if (strcmp (cmethod->name, "Max") == 0) {
5895 if (fsig->params [0]->type == MONO_TYPE_I4)
5897 if (fsig->params [0]->type == MONO_TYPE_U4)
5898 opcode = OP_IMAX_UN;
5899 else if (fsig->params [0]->type == MONO_TYPE_I8)
5901 else if (fsig->params [0]->type == MONO_TYPE_U8)
5902 opcode = OP_LMAX_UN;
5906 if (opcode && fsig->param_count == 2) {
5907 MONO_INST_NEW (cfg, ins, opcode);
5908 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5909 ins->dreg = mono_alloc_ireg (cfg);
5910 ins->sreg1 = args [0]->dreg;
5911 ins->sreg2 = args [1]->dreg;
5912 MONO_ADD_INS (cfg->cbb, ins);
5920 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5922 if (cmethod->klass == mono_defaults.array_class) {
5923 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5924 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5925 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5926 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5927 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5928 return emit_array_unsafe_mov (cfg, fsig, args);
5935 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5937 MonoInst *ins = NULL;
5939 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5941 if (cmethod->klass == mono_defaults.string_class) {
5942 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5943 int dreg = alloc_ireg (cfg);
5944 int index_reg = alloc_preg (cfg);
5945 int add_reg = alloc_preg (cfg);
5947 #if SIZEOF_REGISTER == 8
5948 if (COMPILE_LLVM (cfg)) {
5949 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5951 /* The array reg is 64 bits but the index reg is only 32 */
5952 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5955 index_reg = args [1]->dreg;
5957 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5959 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5960 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5961 add_reg = ins->dreg;
5962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5965 int mult_reg = alloc_preg (cfg);
5966 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5967 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5968 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5969 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5971 type_from_op (cfg, ins, NULL, NULL);
5973 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5974 int dreg = alloc_ireg (cfg);
5975 /* Decompose later to allow more optimizations */
5976 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5977 ins->type = STACK_I4;
5978 ins->flags |= MONO_INST_FAULT;
5979 cfg->cbb->has_array_access = TRUE;
5980 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5985 } else if (cmethod->klass == mono_defaults.object_class) {
5986 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5987 int dreg = alloc_ireg_ref (cfg);
5988 int vt_reg = alloc_preg (cfg);
5989 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5990 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5991 type_from_op (cfg, ins, NULL, NULL);
5994 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5995 int dreg = alloc_ireg (cfg);
5996 int t1 = alloc_ireg (cfg);
5998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5999 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6000 ins->type = STACK_I4;
6003 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6004 MONO_INST_NEW (cfg, ins, OP_NOP);
6005 MONO_ADD_INS (cfg->cbb, ins);
6009 } else if (cmethod->klass == mono_defaults.array_class) {
6010 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6011 return emit_array_generic_access (cfg, fsig, args, FALSE);
6012 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6013 return emit_array_generic_access (cfg, fsig, args, TRUE);
6015 #ifndef MONO_BIG_ARRAYS
6017 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6020 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6021 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6022 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6023 int dreg = alloc_ireg (cfg);
6024 int bounds_reg = alloc_ireg_mp (cfg);
6025 MonoBasicBlock *end_bb, *szarray_bb;
6026 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6028 NEW_BBLOCK (cfg, end_bb);
6029 NEW_BBLOCK (cfg, szarray_bb);
6031 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6032 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6033 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6035 /* Non-szarray case */
6037 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6038 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6040 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6041 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6043 MONO_START_BB (cfg, szarray_bb);
6046 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6047 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6049 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6050 MONO_START_BB (cfg, end_bb);
6052 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6053 ins->type = STACK_I4;
6059 if (cmethod->name [0] != 'g')
6062 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6063 int dreg = alloc_ireg (cfg);
6064 int vtable_reg = alloc_preg (cfg);
6065 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6066 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6067 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6068 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6069 type_from_op (cfg, ins, NULL, NULL);
6072 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6073 int dreg = alloc_ireg (cfg);
6075 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6076 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6077 type_from_op (cfg, ins, NULL, NULL);
6082 } else if (cmethod->klass == runtime_helpers_class) {
6083 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6084 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6088 } else if (cmethod->klass == mono_defaults.monitor_class) {
6089 gboolean is_enter = FALSE;
6090 gboolean is_v4 = FALSE;
6092 if (!strcmp (cmethod->name, "enter_with_atomic_var") && mono_method_signature (cmethod)->param_count == 2) {
6096 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6101 * To make async stack traces work, icalls which can block should have a wrapper.
6102 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6104 MonoBasicBlock *end_bb;
6106 NEW_BBLOCK (cfg, end_bb);
6108 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6110 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6111 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6112 MONO_START_BB (cfg, end_bb);
6115 } else if (cmethod->klass == mono_defaults.thread_class) {
6116 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6117 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6118 MONO_ADD_INS (cfg->cbb, ins);
6120 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6121 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6122 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6124 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6126 if (fsig->params [0]->type == MONO_TYPE_I1)
6127 opcode = OP_LOADI1_MEMBASE;
6128 else if (fsig->params [0]->type == MONO_TYPE_U1)
6129 opcode = OP_LOADU1_MEMBASE;
6130 else if (fsig->params [0]->type == MONO_TYPE_I2)
6131 opcode = OP_LOADI2_MEMBASE;
6132 else if (fsig->params [0]->type == MONO_TYPE_U2)
6133 opcode = OP_LOADU2_MEMBASE;
6134 else if (fsig->params [0]->type == MONO_TYPE_I4)
6135 opcode = OP_LOADI4_MEMBASE;
6136 else if (fsig->params [0]->type == MONO_TYPE_U4)
6137 opcode = OP_LOADU4_MEMBASE;
6138 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6139 opcode = OP_LOADI8_MEMBASE;
6140 else if (fsig->params [0]->type == MONO_TYPE_R4)
6141 opcode = OP_LOADR4_MEMBASE;
6142 else if (fsig->params [0]->type == MONO_TYPE_R8)
6143 opcode = OP_LOADR8_MEMBASE;
6144 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6145 opcode = OP_LOAD_MEMBASE;
6148 MONO_INST_NEW (cfg, ins, opcode);
6149 ins->inst_basereg = args [0]->dreg;
6150 ins->inst_offset = 0;
6151 MONO_ADD_INS (cfg->cbb, ins);
6153 switch (fsig->params [0]->type) {
6160 ins->dreg = mono_alloc_ireg (cfg);
6161 ins->type = STACK_I4;
6165 ins->dreg = mono_alloc_lreg (cfg);
6166 ins->type = STACK_I8;
6170 ins->dreg = mono_alloc_ireg (cfg);
6171 #if SIZEOF_REGISTER == 8
6172 ins->type = STACK_I8;
6174 ins->type = STACK_I4;
6179 ins->dreg = mono_alloc_freg (cfg);
6180 ins->type = STACK_R8;
6183 g_assert (mini_type_is_reference (fsig->params [0]));
6184 ins->dreg = mono_alloc_ireg_ref (cfg);
6185 ins->type = STACK_OBJ;
6189 if (opcode == OP_LOADI8_MEMBASE)
6190 ins = mono_decompose_opcode (cfg, ins);
6192 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6196 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6198 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6200 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6201 opcode = OP_STOREI1_MEMBASE_REG;
6202 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6203 opcode = OP_STOREI2_MEMBASE_REG;
6204 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6205 opcode = OP_STOREI4_MEMBASE_REG;
6206 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6207 opcode = OP_STOREI8_MEMBASE_REG;
6208 else if (fsig->params [0]->type == MONO_TYPE_R4)
6209 opcode = OP_STORER4_MEMBASE_REG;
6210 else if (fsig->params [0]->type == MONO_TYPE_R8)
6211 opcode = OP_STORER8_MEMBASE_REG;
6212 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6213 opcode = OP_STORE_MEMBASE_REG;
6216 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6218 MONO_INST_NEW (cfg, ins, opcode);
6219 ins->sreg1 = args [1]->dreg;
6220 ins->inst_destbasereg = args [0]->dreg;
6221 ins->inst_offset = 0;
6222 MONO_ADD_INS (cfg->cbb, ins);
6224 if (opcode == OP_STOREI8_MEMBASE_REG)
6225 ins = mono_decompose_opcode (cfg, ins);
6230 } else if (cmethod->klass->image == mono_defaults.corlib &&
6231 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6232 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6235 #if SIZEOF_REGISTER == 8
6236 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6237 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6238 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6239 ins->dreg = mono_alloc_preg (cfg);
6240 ins->sreg1 = args [0]->dreg;
6241 ins->type = STACK_I8;
6242 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6243 MONO_ADD_INS (cfg->cbb, ins);
6247 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6249 /* 64 bit reads are already atomic */
6250 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6251 load_ins->dreg = mono_alloc_preg (cfg);
6252 load_ins->inst_basereg = args [0]->dreg;
6253 load_ins->inst_offset = 0;
6254 load_ins->type = STACK_I8;
6255 MONO_ADD_INS (cfg->cbb, load_ins);
6257 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6264 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6265 MonoInst *ins_iconst;
6268 if (fsig->params [0]->type == MONO_TYPE_I4) {
6269 opcode = OP_ATOMIC_ADD_I4;
6270 cfg->has_atomic_add_i4 = TRUE;
6272 #if SIZEOF_REGISTER == 8
6273 else if (fsig->params [0]->type == MONO_TYPE_I8)
6274 opcode = OP_ATOMIC_ADD_I8;
6277 if (!mono_arch_opcode_supported (opcode))
6279 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6280 ins_iconst->inst_c0 = 1;
6281 ins_iconst->dreg = mono_alloc_ireg (cfg);
6282 MONO_ADD_INS (cfg->cbb, ins_iconst);
6284 MONO_INST_NEW (cfg, ins, opcode);
6285 ins->dreg = mono_alloc_ireg (cfg);
6286 ins->inst_basereg = args [0]->dreg;
6287 ins->inst_offset = 0;
6288 ins->sreg2 = ins_iconst->dreg;
6289 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6290 MONO_ADD_INS (cfg->cbb, ins);
6292 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6293 MonoInst *ins_iconst;
6296 if (fsig->params [0]->type == MONO_TYPE_I4) {
6297 opcode = OP_ATOMIC_ADD_I4;
6298 cfg->has_atomic_add_i4 = TRUE;
6300 #if SIZEOF_REGISTER == 8
6301 else if (fsig->params [0]->type == MONO_TYPE_I8)
6302 opcode = OP_ATOMIC_ADD_I8;
6305 if (!mono_arch_opcode_supported (opcode))
6307 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6308 ins_iconst->inst_c0 = -1;
6309 ins_iconst->dreg = mono_alloc_ireg (cfg);
6310 MONO_ADD_INS (cfg->cbb, ins_iconst);
6312 MONO_INST_NEW (cfg, ins, opcode);
6313 ins->dreg = mono_alloc_ireg (cfg);
6314 ins->inst_basereg = args [0]->dreg;
6315 ins->inst_offset = 0;
6316 ins->sreg2 = ins_iconst->dreg;
6317 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6318 MONO_ADD_INS (cfg->cbb, ins);
6320 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6323 if (fsig->params [0]->type == MONO_TYPE_I4) {
6324 opcode = OP_ATOMIC_ADD_I4;
6325 cfg->has_atomic_add_i4 = TRUE;
6327 #if SIZEOF_REGISTER == 8
6328 else if (fsig->params [0]->type == MONO_TYPE_I8)
6329 opcode = OP_ATOMIC_ADD_I8;
6332 if (!mono_arch_opcode_supported (opcode))
6334 MONO_INST_NEW (cfg, ins, opcode);
6335 ins->dreg = mono_alloc_ireg (cfg);
6336 ins->inst_basereg = args [0]->dreg;
6337 ins->inst_offset = 0;
6338 ins->sreg2 = args [1]->dreg;
6339 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6340 MONO_ADD_INS (cfg->cbb, ins);
6343 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6344 MonoInst *f2i = NULL, *i2f;
6345 guint32 opcode, f2i_opcode, i2f_opcode;
6346 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6347 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6349 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6350 fsig->params [0]->type == MONO_TYPE_R4) {
6351 opcode = OP_ATOMIC_EXCHANGE_I4;
6352 f2i_opcode = OP_MOVE_F_TO_I4;
6353 i2f_opcode = OP_MOVE_I4_TO_F;
6354 cfg->has_atomic_exchange_i4 = TRUE;
6356 #if SIZEOF_REGISTER == 8
6358 fsig->params [0]->type == MONO_TYPE_I8 ||
6359 fsig->params [0]->type == MONO_TYPE_R8 ||
6360 fsig->params [0]->type == MONO_TYPE_I) {
6361 opcode = OP_ATOMIC_EXCHANGE_I8;
6362 f2i_opcode = OP_MOVE_F_TO_I8;
6363 i2f_opcode = OP_MOVE_I8_TO_F;
6366 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6367 opcode = OP_ATOMIC_EXCHANGE_I4;
6368 cfg->has_atomic_exchange_i4 = TRUE;
6374 if (!mono_arch_opcode_supported (opcode))
6378 /* TODO: Decompose these opcodes instead of bailing here. */
6379 if (COMPILE_SOFT_FLOAT (cfg))
6382 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6383 f2i->dreg = mono_alloc_ireg (cfg);
6384 f2i->sreg1 = args [1]->dreg;
6385 if (f2i_opcode == OP_MOVE_F_TO_I4)
6386 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6387 MONO_ADD_INS (cfg->cbb, f2i);
6390 MONO_INST_NEW (cfg, ins, opcode);
6391 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6392 ins->inst_basereg = args [0]->dreg;
6393 ins->inst_offset = 0;
6394 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6395 MONO_ADD_INS (cfg->cbb, ins);
6397 switch (fsig->params [0]->type) {
6399 ins->type = STACK_I4;
6402 ins->type = STACK_I8;
6405 #if SIZEOF_REGISTER == 8
6406 ins->type = STACK_I8;
6408 ins->type = STACK_I4;
6413 ins->type = STACK_R8;
6416 g_assert (mini_type_is_reference (fsig->params [0]));
6417 ins->type = STACK_OBJ;
6422 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6423 i2f->dreg = mono_alloc_freg (cfg);
6424 i2f->sreg1 = ins->dreg;
6425 i2f->type = STACK_R8;
6426 if (i2f_opcode == OP_MOVE_I4_TO_F)
6427 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6428 MONO_ADD_INS (cfg->cbb, i2f);
6433 if (cfg->gen_write_barriers && is_ref)
6434 emit_write_barrier (cfg, args [0], args [1]);
6436 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6437 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6438 guint32 opcode, f2i_opcode, i2f_opcode;
6439 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6440 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6442 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6443 fsig->params [1]->type == MONO_TYPE_R4) {
6444 opcode = OP_ATOMIC_CAS_I4;
6445 f2i_opcode = OP_MOVE_F_TO_I4;
6446 i2f_opcode = OP_MOVE_I4_TO_F;
6447 cfg->has_atomic_cas_i4 = TRUE;
6449 #if SIZEOF_REGISTER == 8
6451 fsig->params [1]->type == MONO_TYPE_I8 ||
6452 fsig->params [1]->type == MONO_TYPE_R8 ||
6453 fsig->params [1]->type == MONO_TYPE_I) {
6454 opcode = OP_ATOMIC_CAS_I8;
6455 f2i_opcode = OP_MOVE_F_TO_I8;
6456 i2f_opcode = OP_MOVE_I8_TO_F;
6459 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6460 opcode = OP_ATOMIC_CAS_I4;
6461 cfg->has_atomic_cas_i4 = TRUE;
6467 if (!mono_arch_opcode_supported (opcode))
6471 /* TODO: Decompose these opcodes instead of bailing here. */
6472 if (COMPILE_SOFT_FLOAT (cfg))
6475 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6476 f2i_new->dreg = mono_alloc_ireg (cfg);
6477 f2i_new->sreg1 = args [1]->dreg;
6478 if (f2i_opcode == OP_MOVE_F_TO_I4)
6479 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6480 MONO_ADD_INS (cfg->cbb, f2i_new);
6482 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6483 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6484 f2i_cmp->sreg1 = args [2]->dreg;
6485 if (f2i_opcode == OP_MOVE_F_TO_I4)
6486 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6487 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6490 MONO_INST_NEW (cfg, ins, opcode);
6491 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6492 ins->sreg1 = args [0]->dreg;
6493 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6494 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6495 MONO_ADD_INS (cfg->cbb, ins);
6497 switch (fsig->params [1]->type) {
6499 ins->type = STACK_I4;
6502 ins->type = STACK_I8;
6505 #if SIZEOF_REGISTER == 8
6506 ins->type = STACK_I8;
6508 ins->type = STACK_I4;
6512 ins->type = cfg->r4_stack_type;
6515 ins->type = STACK_R8;
6518 g_assert (mini_type_is_reference (fsig->params [1]));
6519 ins->type = STACK_OBJ;
6524 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6525 i2f->dreg = mono_alloc_freg (cfg);
6526 i2f->sreg1 = ins->dreg;
6527 i2f->type = STACK_R8;
6528 if (i2f_opcode == OP_MOVE_I4_TO_F)
6529 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6530 MONO_ADD_INS (cfg->cbb, i2f);
6535 if (cfg->gen_write_barriers && is_ref)
6536 emit_write_barrier (cfg, args [0], args [1]);
6538 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6539 fsig->params [1]->type == MONO_TYPE_I4) {
6540 MonoInst *cmp, *ceq;
6542 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6545 /* int32 r = CAS (location, value, comparand); */
6546 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6547 ins->dreg = alloc_ireg (cfg);
6548 ins->sreg1 = args [0]->dreg;
6549 ins->sreg2 = args [1]->dreg;
6550 ins->sreg3 = args [2]->dreg;
6551 ins->type = STACK_I4;
6552 MONO_ADD_INS (cfg->cbb, ins);
6554 /* bool result = r == comparand; */
6555 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6556 cmp->sreg1 = ins->dreg;
6557 cmp->sreg2 = args [2]->dreg;
6558 cmp->type = STACK_I4;
6559 MONO_ADD_INS (cfg->cbb, cmp);
6561 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6562 ceq->dreg = alloc_ireg (cfg);
6563 ceq->type = STACK_I4;
6564 MONO_ADD_INS (cfg->cbb, ceq);
6566 /* *success = result; */
6567 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6569 cfg->has_atomic_cas_i4 = TRUE;
6571 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6572 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6576 } else if (cmethod->klass->image == mono_defaults.corlib &&
6577 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6578 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6581 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6583 MonoType *t = fsig->params [0];
6585 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6587 g_assert (t->byref);
6588 /* t is a byref type, so the reference check is more complicated */
6589 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6590 if (t->type == MONO_TYPE_I1)
6591 opcode = OP_ATOMIC_LOAD_I1;
6592 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6593 opcode = OP_ATOMIC_LOAD_U1;
6594 else if (t->type == MONO_TYPE_I2)
6595 opcode = OP_ATOMIC_LOAD_I2;
6596 else if (t->type == MONO_TYPE_U2)
6597 opcode = OP_ATOMIC_LOAD_U2;
6598 else if (t->type == MONO_TYPE_I4)
6599 opcode = OP_ATOMIC_LOAD_I4;
6600 else if (t->type == MONO_TYPE_U4)
6601 opcode = OP_ATOMIC_LOAD_U4;
6602 else if (t->type == MONO_TYPE_R4)
6603 opcode = OP_ATOMIC_LOAD_R4;
6604 else if (t->type == MONO_TYPE_R8)
6605 opcode = OP_ATOMIC_LOAD_R8;
6606 #if SIZEOF_REGISTER == 8
6607 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6608 opcode = OP_ATOMIC_LOAD_I8;
6609 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6610 opcode = OP_ATOMIC_LOAD_U8;
6612 else if (t->type == MONO_TYPE_I)
6613 opcode = OP_ATOMIC_LOAD_I4;
6614 else if (is_ref || t->type == MONO_TYPE_U)
6615 opcode = OP_ATOMIC_LOAD_U4;
6619 if (!mono_arch_opcode_supported (opcode))
6622 MONO_INST_NEW (cfg, ins, opcode);
6623 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6624 ins->sreg1 = args [0]->dreg;
6625 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6626 MONO_ADD_INS (cfg->cbb, ins);
6629 case MONO_TYPE_BOOLEAN:
6636 ins->type = STACK_I4;
6640 ins->type = STACK_I8;
6644 #if SIZEOF_REGISTER == 8
6645 ins->type = STACK_I8;
6647 ins->type = STACK_I4;
6651 ins->type = cfg->r4_stack_type;
6654 ins->type = STACK_R8;
6658 ins->type = STACK_OBJ;
6664 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6666 MonoType *t = fsig->params [0];
6669 g_assert (t->byref);
6670 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6671 if (t->type == MONO_TYPE_I1)
6672 opcode = OP_ATOMIC_STORE_I1;
6673 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6674 opcode = OP_ATOMIC_STORE_U1;
6675 else if (t->type == MONO_TYPE_I2)
6676 opcode = OP_ATOMIC_STORE_I2;
6677 else if (t->type == MONO_TYPE_U2)
6678 opcode = OP_ATOMIC_STORE_U2;
6679 else if (t->type == MONO_TYPE_I4)
6680 opcode = OP_ATOMIC_STORE_I4;
6681 else if (t->type == MONO_TYPE_U4)
6682 opcode = OP_ATOMIC_STORE_U4;
6683 else if (t->type == MONO_TYPE_R4)
6684 opcode = OP_ATOMIC_STORE_R4;
6685 else if (t->type == MONO_TYPE_R8)
6686 opcode = OP_ATOMIC_STORE_R8;
6687 #if SIZEOF_REGISTER == 8
6688 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6689 opcode = OP_ATOMIC_STORE_I8;
6690 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6691 opcode = OP_ATOMIC_STORE_U8;
6693 else if (t->type == MONO_TYPE_I)
6694 opcode = OP_ATOMIC_STORE_I4;
6695 else if (is_ref || t->type == MONO_TYPE_U)
6696 opcode = OP_ATOMIC_STORE_U4;
6700 if (!mono_arch_opcode_supported (opcode))
6703 MONO_INST_NEW (cfg, ins, opcode);
6704 ins->dreg = args [0]->dreg;
6705 ins->sreg1 = args [1]->dreg;
6706 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6707 MONO_ADD_INS (cfg->cbb, ins);
6709 if (cfg->gen_write_barriers && is_ref)
6710 emit_write_barrier (cfg, args [0], args [1]);
6716 } else if (cmethod->klass->image == mono_defaults.corlib &&
6717 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6718 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6719 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6720 if (should_insert_brekpoint (cfg->method)) {
6721 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6723 MONO_INST_NEW (cfg, ins, OP_NOP);
6724 MONO_ADD_INS (cfg->cbb, ins);
6728 } else if (cmethod->klass->image == mono_defaults.corlib &&
6729 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6730 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6731 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6733 EMIT_NEW_ICONST (cfg, ins, 1);
6735 EMIT_NEW_ICONST (cfg, ins, 0);
6738 } else if (cmethod->klass->image == mono_defaults.corlib &&
6739 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6740 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6741 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6742 /* No stack walks are currently available, so implement this as an intrinsic */
6743 MonoInst *assembly_ins;
6745 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6746 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6749 } else if (cmethod->klass->image == mono_defaults.corlib &&
6750 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6751 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6752 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6753 /* No stack walks are currently available, so implement this as an intrinsic */
6754 MonoInst *method_ins;
6755 MonoMethod *declaring = cfg->method;
6757 /* This returns the declaring generic method */
6758 if (declaring->is_inflated)
6759 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6760 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6761 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6762 cfg->no_inline = TRUE;
6763 if (cfg->method != cfg->current_method)
6764 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6767 } else if (cmethod->klass == mono_defaults.math_class) {
6769 * There is general branchless code for Min/Max, but it does not work for
6771 * http://everything2.com/?node_id=1051618
6773 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6774 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6775 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6776 !strcmp (cmethod->klass->name, "Selector")) ||
6777 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6778 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6779 !strcmp (cmethod->klass->name, "Selector"))
6781 if (cfg->backend->have_objc_get_selector &&
6782 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6783 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6784 cfg->compile_aot && !cfg->llvm_only) {
6786 MonoJumpInfoToken *ji;
6791 cfg->exception_message = g_strdup ("GetHandle");
6792 cfg->disable_llvm = TRUE;
6794 if (args [0]->opcode == OP_GOT_ENTRY) {
6795 pi = (MonoInst *)args [0]->inst_p1;
6796 g_assert (pi->opcode == OP_PATCH_INFO);
6797 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6798 ji = (MonoJumpInfoToken *)pi->inst_p0;
6800 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6801 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6804 NULLIFY_INS (args [0]);
6807 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6808 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6809 ins->dreg = mono_alloc_ireg (cfg);
6811 ins->inst_p0 = mono_string_to_utf8 (s);
6812 MONO_ADD_INS (cfg->cbb, ins);
6817 #ifdef MONO_ARCH_SIMD_INTRINSICS
6818 if (cfg->opt & MONO_OPT_SIMD) {
6819 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6825 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6829 if (COMPILE_LLVM (cfg)) {
6830 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6835 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6839 * This entry point could be used later for arbitrary method
6842 inline static MonoInst*
6843 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6844 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6846 if (method->klass == mono_defaults.string_class) {
6847 /* managed string allocation support */
6848 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6849 MonoInst *iargs [2];
6850 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6851 MonoMethod *managed_alloc = NULL;
6853 g_assert (vtable); /*Should not fail since it System.String*/
6854 #ifndef MONO_CROSS_COMPILE
6855 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6859 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6860 iargs [1] = args [0];
6861 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6868 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6870 MonoInst *store, *temp;
6873 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6874 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6877 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6878 * would be different than the MonoInst's used to represent arguments, and
6879 * the ldelema implementation can't deal with that.
6880 * Solution: When ldelema is used on an inline argument, create a var for
6881 * it, emit ldelema on that var, and emit the saving code below in
6882 * inline_method () if needed.
6884 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6885 cfg->args [i] = temp;
6886 /* This uses cfg->args [i] which is set by the preceeding line */
6887 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6888 store->cil_code = sp [0]->cil_code;
6893 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6894 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6896 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6898 check_inline_called_method_name_limit (MonoMethod *called_method)
6901 static const char *limit = NULL;
6903 if (limit == NULL) {
6904 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6906 if (limit_string != NULL)
6907 limit = limit_string;
6912 if (limit [0] != '\0') {
6913 char *called_method_name = mono_method_full_name (called_method, TRUE);
6915 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6916 g_free (called_method_name);
6918 //return (strncmp_result <= 0);
6919 return (strncmp_result == 0);
6926 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6928 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6931 static const char *limit = NULL;
6933 if (limit == NULL) {
6934 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6935 if (limit_string != NULL) {
6936 limit = limit_string;
6942 if (limit [0] != '\0') {
6943 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6945 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6946 g_free (caller_method_name);
6948 //return (strncmp_result <= 0);
6949 return (strncmp_result == 0);
6957 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6959 static double r8_0 = 0.0;
6960 static float r4_0 = 0.0;
6964 rtype = mini_get_underlying_type (rtype);
6968 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6969 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6970 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6971 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6972 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6973 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6974 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6975 ins->type = STACK_R4;
6976 ins->inst_p0 = (void*)&r4_0;
6978 MONO_ADD_INS (cfg->cbb, ins);
6979 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6980 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6981 ins->type = STACK_R8;
6982 ins->inst_p0 = (void*)&r8_0;
6984 MONO_ADD_INS (cfg->cbb, ins);
6985 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6986 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6987 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6988 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6989 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6991 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6996 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7000 rtype = mini_get_underlying_type (rtype);
7004 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7005 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7006 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7007 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7008 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7009 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7010 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7011 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7012 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7013 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7014 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7016 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7017 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7019 emit_init_rvar (cfg, dreg, rtype);
7023 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7025 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7027 MonoInst *var = cfg->locals [local];
7028 if (COMPILE_SOFT_FLOAT (cfg)) {
7030 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7031 emit_init_rvar (cfg, reg, type);
7032 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7035 emit_init_rvar (cfg, var->dreg, type);
7037 emit_dummy_init_rvar (cfg, var->dreg, type);
7044 * Return the cost of inlining CMETHOD.
7047 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7048 guchar *ip, guint real_offset, gboolean inline_always)
7050 MonoInst *ins, *rvar = NULL;
7051 MonoMethodHeader *cheader;
7052 MonoBasicBlock *ebblock, *sbblock;
7054 MonoMethod *prev_inlined_method;
7055 MonoInst **prev_locals, **prev_args;
7056 MonoType **prev_arg_types;
7057 guint prev_real_offset;
7058 GHashTable *prev_cbb_hash;
7059 MonoBasicBlock **prev_cil_offset_to_bb;
7060 MonoBasicBlock *prev_cbb;
7061 unsigned char* prev_cil_start;
7062 guint32 prev_cil_offset_to_bb_len;
7063 MonoMethod *prev_current_method;
7064 MonoGenericContext *prev_generic_context;
7065 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7067 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7069 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7070 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7073 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7074 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7079 fsig = mono_method_signature (cmethod);
7081 if (cfg->verbose_level > 2)
7082 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7084 if (!cmethod->inline_info) {
7085 cfg->stat_inlineable_methods++;
7086 cmethod->inline_info = 1;
7089 /* allocate local variables */
7090 cheader = mono_method_get_header (cmethod);
7092 if (cheader == NULL || mono_loader_get_last_error ()) {
7094 mono_metadata_free_mh (cheader);
7095 if (inline_always && mono_loader_get_last_error ()) {
7096 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7097 mono_error_set_from_loader_error (&cfg->error);
7100 mono_loader_clear_error ();
7104 /*Must verify before creating locals as it can cause the JIT to assert.*/
7105 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7106 mono_metadata_free_mh (cheader);
7110 /* allocate space to store the return value */
7111 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7112 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7115 prev_locals = cfg->locals;
7116 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7117 for (i = 0; i < cheader->num_locals; ++i)
7118 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7120 /* allocate start and end blocks */
7121 /* This is needed so if the inline is aborted, we can clean up */
7122 NEW_BBLOCK (cfg, sbblock);
7123 sbblock->real_offset = real_offset;
7125 NEW_BBLOCK (cfg, ebblock);
7126 ebblock->block_num = cfg->num_bblocks++;
7127 ebblock->real_offset = real_offset;
7129 prev_args = cfg->args;
7130 prev_arg_types = cfg->arg_types;
7131 prev_inlined_method = cfg->inlined_method;
7132 cfg->inlined_method = cmethod;
7133 cfg->ret_var_set = FALSE;
7134 cfg->inline_depth ++;
7135 prev_real_offset = cfg->real_offset;
7136 prev_cbb_hash = cfg->cbb_hash;
7137 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7138 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7139 prev_cil_start = cfg->cil_start;
7140 prev_cbb = cfg->cbb;
7141 prev_current_method = cfg->current_method;
7142 prev_generic_context = cfg->generic_context;
7143 prev_ret_var_set = cfg->ret_var_set;
7144 prev_disable_inline = cfg->disable_inline;
7146 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7149 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7151 ret_var_set = cfg->ret_var_set;
7153 cfg->inlined_method = prev_inlined_method;
7154 cfg->real_offset = prev_real_offset;
7155 cfg->cbb_hash = prev_cbb_hash;
7156 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7157 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7158 cfg->cil_start = prev_cil_start;
7159 cfg->locals = prev_locals;
7160 cfg->args = prev_args;
7161 cfg->arg_types = prev_arg_types;
7162 cfg->current_method = prev_current_method;
7163 cfg->generic_context = prev_generic_context;
7164 cfg->ret_var_set = prev_ret_var_set;
7165 cfg->disable_inline = prev_disable_inline;
7166 cfg->inline_depth --;
7168 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7169 if (cfg->verbose_level > 2)
7170 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7172 cfg->stat_inlined_methods++;
7174 /* always add some code to avoid block split failures */
7175 MONO_INST_NEW (cfg, ins, OP_NOP);
7176 MONO_ADD_INS (prev_cbb, ins);
7178 prev_cbb->next_bb = sbblock;
7179 link_bblock (cfg, prev_cbb, sbblock);
7182 * Get rid of the begin and end bblocks if possible to aid local
7185 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7187 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7188 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7190 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7191 MonoBasicBlock *prev = ebblock->in_bb [0];
7193 if (prev->next_bb == ebblock) {
7194 mono_merge_basic_blocks (cfg, prev, ebblock);
7196 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7197 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7198 cfg->cbb = prev_cbb;
7201 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7206 * Its possible that the rvar is set in some prev bblock, but not in others.
7212 for (i = 0; i < ebblock->in_count; ++i) {
7213 bb = ebblock->in_bb [i];
7215 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7218 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7228 * If the inlined method contains only a throw, then the ret var is not
7229 * set, so set it to a dummy value.
7232 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7234 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7237 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7240 if (cfg->verbose_level > 2)
7241 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7242 cfg->exception_type = MONO_EXCEPTION_NONE;
7243 mono_loader_clear_error ();
7245 /* This gets rid of the newly added bblocks */
7246 cfg->cbb = prev_cbb;
7248 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7253 * Some of these comments may well be out-of-date.
7254 * Design decisions: we do a single pass over the IL code (and we do bblock
7255 * splitting/merging in the few cases when it's required: a back jump to an IL
7256 * address that was not already seen as bblock starting point).
7257 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7258 * Complex operations are decomposed in simpler ones right away. We need to let the
7259 * arch-specific code peek and poke inside this process somehow (except when the
7260 * optimizations can take advantage of the full semantic info of coarse opcodes).
7261 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7262 * MonoInst->opcode initially is the IL opcode or some simplification of that
7263 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7264 * opcode with value bigger than OP_LAST.
7265 * At this point the IR can be handed over to an interpreter, a dumb code generator
7266 * or to the optimizing code generator that will translate it to SSA form.
7268 * Profiling directed optimizations.
7269 * We may compile by default with few or no optimizations and instrument the code
7270 * or the user may indicate what methods to optimize the most either in a config file
7271 * or through repeated runs where the compiler applies offline the optimizations to
7272 * each method and then decides if it was worth it.
7275 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7276 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7277 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7278 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7279 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7280 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7281 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7282 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7284 /* offset from br.s -> br like opcodes */
7285 #define BIG_BRANCH_OFFSET 13
7288 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7290 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7292 return b == NULL || b == bb;
7296 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7298 unsigned char *ip = start;
7299 unsigned char *target;
7302 MonoBasicBlock *bblock;
7303 const MonoOpcode *opcode;
7306 cli_addr = ip - start;
7307 i = mono_opcode_value ((const guint8 **)&ip, end);
7310 opcode = &mono_opcodes [i];
7311 switch (opcode->argument) {
7312 case MonoInlineNone:
7315 case MonoInlineString:
7316 case MonoInlineType:
7317 case MonoInlineField:
7318 case MonoInlineMethod:
7321 case MonoShortInlineR:
7328 case MonoShortInlineVar:
7329 case MonoShortInlineI:
7332 case MonoShortInlineBrTarget:
7333 target = start + cli_addr + 2 + (signed char)ip [1];
7334 GET_BBLOCK (cfg, bblock, target);
7337 GET_BBLOCK (cfg, bblock, ip);
7339 case MonoInlineBrTarget:
7340 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7341 GET_BBLOCK (cfg, bblock, target);
7344 GET_BBLOCK (cfg, bblock, ip);
7346 case MonoInlineSwitch: {
7347 guint32 n = read32 (ip + 1);
7350 cli_addr += 5 + 4 * n;
7351 target = start + cli_addr;
7352 GET_BBLOCK (cfg, bblock, target);
7354 for (j = 0; j < n; ++j) {
7355 target = start + cli_addr + (gint32)read32 (ip);
7356 GET_BBLOCK (cfg, bblock, target);
7366 g_assert_not_reached ();
7369 if (i == CEE_THROW) {
7370 unsigned char *bb_start = ip - 1;
7372 /* Find the start of the bblock containing the throw */
7374 while ((bb_start >= start) && !bblock) {
7375 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7379 bblock->out_of_line = 1;
7389 static inline MonoMethod *
7390 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7394 mono_error_init (error);
7396 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7397 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7399 method = mono_class_inflate_generic_method_checked (method, context, error);
7402 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7408 static inline MonoMethod *
7409 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7412 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7414 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7415 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7419 if (!method && !cfg)
7420 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7425 static inline MonoClass*
7426 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7431 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7432 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7434 klass = mono_class_inflate_generic_class (klass, context);
7436 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7437 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7440 mono_class_init (klass);
7444 static inline MonoMethodSignature*
7445 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7447 MonoMethodSignature *fsig;
7449 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7450 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7452 fsig = mono_metadata_parse_signature (method->klass->image, token);
7456 fsig = mono_inflate_generic_signature(fsig, context, &error);
7458 g_assert(mono_error_ok(&error));
7464 throw_exception (void)
7466 static MonoMethod *method = NULL;
7469 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7470 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7477 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7479 MonoMethod *thrower = throw_exception ();
7482 EMIT_NEW_PCONST (cfg, args [0], ex);
7483 mono_emit_method_call (cfg, thrower, args, NULL);
7487 * Return the original method is a wrapper is specified. We can only access
7488 * the custom attributes from the original method.
7491 get_original_method (MonoMethod *method)
7493 if (method->wrapper_type == MONO_WRAPPER_NONE)
7496 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7497 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7500 /* in other cases we need to find the original method */
7501 return mono_marshal_method_from_wrapper (method);
7505 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7507 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7508 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7510 emit_throw_exception (cfg, ex);
7514 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7516 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7517 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7519 emit_throw_exception (cfg, ex);
7523 * Check that the IL instructions at ip are the array initialization
7524 * sequence and return the pointer to the data and the size.
7527 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7530 * newarr[System.Int32]
7532 * ldtoken field valuetype ...
7533 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7535 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7537 guint32 token = read32 (ip + 7);
7538 guint32 field_token = read32 (ip + 2);
7539 guint32 field_index = field_token & 0xffffff;
7541 const char *data_ptr;
7543 MonoMethod *cmethod;
7544 MonoClass *dummy_class;
7545 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7549 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7553 *out_field_token = field_token;
7555 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7558 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7560 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7561 case MONO_TYPE_BOOLEAN:
7565 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7566 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7567 case MONO_TYPE_CHAR:
7584 if (size > mono_type_size (field->type, &dummy_align))
7587 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7588 if (!image_is_dynamic (method->klass->image)) {
7589 field_index = read32 (ip + 2) & 0xffffff;
7590 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7591 data_ptr = mono_image_rva_map (method->klass->image, rva);
7592 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7593 /* for aot code we do the lookup on load */
7594 if (aot && data_ptr)
7595 return (const char *)GUINT_TO_POINTER (rva);
7597 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7599 data_ptr = mono_field_get_data (field);
7607 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7609 char *method_fname = mono_method_full_name (method, TRUE);
7611 MonoMethodHeader *header = mono_method_get_header (method);
7613 if (header->code_size == 0)
7614 method_code = g_strdup ("method body is empty.");
7616 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7617 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7618 g_free (method_fname);
7619 g_free (method_code);
7620 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7624 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7627 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7628 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7629 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7630 /* Optimize reg-reg moves away */
7632 * Can't optimize other opcodes, since sp[0] might point to
7633 * the last ins of a decomposed opcode.
7635 sp [0]->dreg = (cfg)->locals [n]->dreg;
7637 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7642 * ldloca inhibits many optimizations so try to get rid of it in common
7645 static inline unsigned char *
7646 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7656 local = read16 (ip + 2);
7660 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7661 /* From the INITOBJ case */
7662 token = read32 (ip + 2);
7663 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7664 CHECK_TYPELOAD (klass);
7665 type = mini_get_underlying_type (&klass->byval_arg);
7666 emit_init_local (cfg, local, type, TRUE);
7674 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7676 MonoInst *icall_args [16];
7677 MonoInst *call_target, *ins, *vtable_ins;
7678 int arg_reg, this_reg, vtable_reg;
7679 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7680 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7681 gboolean variant_iface = FALSE;
7686 * In llvm-only mode, vtables contain function descriptors instead of
7687 * method addresses/trampolines.
7689 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7692 slot = mono_method_get_imt_slot (cmethod);
7694 slot = mono_method_get_vtable_index (cmethod);
7696 this_reg = sp [0]->dreg;
7698 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7699 variant_iface = TRUE;
7701 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7703 * The simplest case, a normal virtual call.
7705 int slot_reg = alloc_preg (cfg);
7706 int addr_reg = alloc_preg (cfg);
7707 int arg_reg = alloc_preg (cfg);
7708 MonoBasicBlock *non_null_bb;
7710 vtable_reg = alloc_preg (cfg);
7711 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7712 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7714 /* Load the vtable slot, which contains a function descriptor. */
7715 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7717 NEW_BBLOCK (cfg, non_null_bb);
7719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7720 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7721 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7724 // FIXME: Make the wrapper use the preserveall cconv
7725 // FIXME: Use one icall per slot for small slot numbers ?
7726 icall_args [0] = vtable_ins;
7727 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7728 /* Make the icall return the vtable slot value to save some code space */
7729 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7730 ins->dreg = slot_reg;
7731 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7734 MONO_START_BB (cfg, non_null_bb);
7735 /* Load the address + arg from the vtable slot */
7736 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7737 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7739 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7742 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7744 * A simple interface call
7746 * We make a call through an imt slot to obtain the function descriptor we need to call.
7747 * The imt slot contains a function descriptor for a runtime function + arg.
7749 int slot_reg = alloc_preg (cfg);
7750 int addr_reg = alloc_preg (cfg);
7751 int arg_reg = alloc_preg (cfg);
7752 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7754 vtable_reg = alloc_preg (cfg);
7755 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7756 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7759 * The slot is already initialized when the vtable is created so there is no need
7763 /* Load the imt slot, which contains a function descriptor. */
7764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7766 /* Load the address + arg of the imt thunk from the imt slot */
7767 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7768 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7770 * IMT thunks in llvm-only mode are C functions which take an info argument
7771 * plus the imt method and return the ftndesc to call.
7773 icall_args [0] = thunk_arg_ins;
7774 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7775 cmethod, MONO_RGCTX_INFO_METHOD);
7776 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7778 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7781 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7783 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7784 * dynamically extended as more instantiations are discovered.
7785 * This handles generic virtual methods both on classes and interfaces.
7787 int slot_reg = alloc_preg (cfg);
7788 int addr_reg = alloc_preg (cfg);
7789 int arg_reg = alloc_preg (cfg);
7790 int ftndesc_reg = alloc_preg (cfg);
7791 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7792 MonoBasicBlock *slowpath_bb, *end_bb;
7794 NEW_BBLOCK (cfg, slowpath_bb);
7795 NEW_BBLOCK (cfg, end_bb);
7797 vtable_reg = alloc_preg (cfg);
7798 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7800 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7802 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7804 /* Load the slot, which contains a function descriptor. */
7805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7807 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7808 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7809 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7810 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7813 /* Same as with iface calls */
7814 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7815 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7816 icall_args [0] = thunk_arg_ins;
7817 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7818 cmethod, MONO_RGCTX_INFO_METHOD);
7819 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7820 ftndesc_ins->dreg = ftndesc_reg;
7822 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7823 * they don't know about yet. Fall back to the slowpath in that case.
7825 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7826 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7828 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7831 MONO_START_BB (cfg, slowpath_bb);
7832 icall_args [0] = vtable_ins;
7833 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7834 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7835 cmethod, MONO_RGCTX_INFO_METHOD);
7837 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7839 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7840 ftndesc_ins->dreg = ftndesc_reg;
7841 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7844 MONO_START_BB (cfg, end_bb);
7845 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7849 * Non-optimized cases
7851 icall_args [0] = sp [0];
7852 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7854 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7855 cmethod, MONO_RGCTX_INFO_METHOD);
7857 arg_reg = alloc_preg (cfg);
7858 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7859 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7861 g_assert (is_gsharedvt);
7863 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7865 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7868 * Pass the extra argument even if the callee doesn't receive it, most
7869 * calling conventions allow this.
7871 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7875 is_exception_class (MonoClass *klass)
7878 if (klass == mono_defaults.exception_class)
7880 klass = klass->parent;
7886 * is_jit_optimizer_disabled:
7888 * Determine whenever M's assembly has a DebuggableAttribute with the
7889 * IsJITOptimizerDisabled flag set.
7892 is_jit_optimizer_disabled (MonoMethod *m)
7894 MonoAssembly *ass = m->klass->image->assembly;
7895 MonoCustomAttrInfo* attrs;
7896 static MonoClass *klass;
7898 gboolean val = FALSE;
7901 if (ass->jit_optimizer_disabled_inited)
7902 return ass->jit_optimizer_disabled;
7905 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7908 ass->jit_optimizer_disabled = FALSE;
7909 mono_memory_barrier ();
7910 ass->jit_optimizer_disabled_inited = TRUE;
7914 attrs = mono_custom_attrs_from_assembly (ass);
7916 for (i = 0; i < attrs->num_attrs; ++i) {
7917 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7919 MonoMethodSignature *sig;
7921 if (!attr->ctor || attr->ctor->klass != klass)
7923 /* Decode the attribute. See reflection.c */
7924 p = (const char*)attr->data;
7925 g_assert (read16 (p) == 0x0001);
7928 // FIXME: Support named parameters
7929 sig = mono_method_signature (attr->ctor);
7930 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7932 /* Two boolean arguments */
7936 mono_custom_attrs_free (attrs);
7939 ass->jit_optimizer_disabled = val;
7940 mono_memory_barrier ();
7941 ass->jit_optimizer_disabled_inited = TRUE;
7947 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7949 gboolean supported_tail_call;
7952 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7954 for (i = 0; i < fsig->param_count; ++i) {
7955 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7956 /* These can point to the current method's stack */
7957 supported_tail_call = FALSE;
7959 if (fsig->hasthis && cmethod->klass->valuetype)
7960 /* this might point to the current method's stack */
7961 supported_tail_call = FALSE;
7962 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7963 supported_tail_call = FALSE;
7964 if (cfg->method->save_lmf)
7965 supported_tail_call = FALSE;
7966 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7967 supported_tail_call = FALSE;
7968 if (call_opcode != CEE_CALL)
7969 supported_tail_call = FALSE;
7971 /* Debugging support */
7973 if (supported_tail_call) {
7974 if (!mono_debug_count ())
7975 supported_tail_call = FALSE;
7979 return supported_tail_call;
7985 * Handle calls made to ctors from NEWOBJ opcodes.
7988 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7989 MonoInst **sp, guint8 *ip, int *inline_costs)
7991 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7993 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7994 mono_method_is_generic_sharable (cmethod, TRUE)) {
7995 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7996 mono_class_vtable (cfg->domain, cmethod->klass);
7997 CHECK_TYPELOAD (cmethod->klass);
7999 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8000 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8003 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8004 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8006 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8008 CHECK_TYPELOAD (cmethod->klass);
8009 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8014 /* Avoid virtual calls to ctors if possible */
8015 if (mono_class_is_marshalbyref (cmethod->klass))
8016 callvirt_this_arg = sp [0];
8018 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8019 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8020 CHECK_CFG_EXCEPTION;
8021 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8022 mono_method_check_inlining (cfg, cmethod) &&
8023 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8026 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8027 cfg->real_offset += 5;
8029 *inline_costs += costs - 5;
8031 INLINE_FAILURE ("inline failure");
8032 // FIXME-VT: Clean this up
8033 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8034 GSHAREDVT_FAILURE(*ip);
8035 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8037 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8040 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8042 if (cfg->llvm_only) {
8043 // FIXME: Avoid initializing vtable_arg
8044 emit_llvmonly_calli (cfg, fsig, sp, addr);
8046 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8048 } else if (context_used &&
8049 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8050 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8051 MonoInst *cmethod_addr;
8053 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8055 if (cfg->llvm_only) {
8056 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8057 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8058 emit_llvmonly_calli (cfg, fsig, sp, addr);
8060 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8061 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8063 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8066 INLINE_FAILURE ("ctor call");
8067 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8068 callvirt_this_arg, NULL, vtable_arg);
8075 emit_setret (MonoCompile *cfg, MonoInst *val)
8077 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8080 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8083 if (!cfg->vret_addr) {
8084 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8086 EMIT_NEW_RETLOADA (cfg, ret_addr);
8088 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8089 ins->klass = mono_class_from_mono_type (ret_type);
8092 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8093 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8094 MonoInst *iargs [1];
8098 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8099 mono_arch_emit_setret (cfg, cfg->method, conv);
8101 mono_arch_emit_setret (cfg, cfg->method, val);
8104 mono_arch_emit_setret (cfg, cfg->method, val);
8110 * mono_method_to_ir:
8112 * Translate the .net IL into linear IR.
8115 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8116 MonoInst *return_var, MonoInst **inline_args,
8117 guint inline_offset, gboolean is_virtual_call)
8120 MonoInst *ins, **sp, **stack_start;
8121 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8122 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8123 MonoMethod *cmethod, *method_definition;
8124 MonoInst **arg_array;
8125 MonoMethodHeader *header;
8127 guint32 token, ins_flag;
8129 MonoClass *constrained_class = NULL;
8130 unsigned char *ip, *end, *target, *err_pos;
8131 MonoMethodSignature *sig;
8132 MonoGenericContext *generic_context = NULL;
8133 MonoGenericContainer *generic_container = NULL;
8134 MonoType **param_types;
8135 int i, n, start_new_bblock, dreg;
8136 int num_calls = 0, inline_costs = 0;
8137 int breakpoint_id = 0;
8139 GSList *class_inits = NULL;
8140 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8142 gboolean init_locals, seq_points, skip_dead_blocks;
8143 gboolean sym_seq_points = FALSE;
8144 MonoDebugMethodInfo *minfo;
8145 MonoBitSet *seq_point_locs = NULL;
8146 MonoBitSet *seq_point_set_locs = NULL;
8148 cfg->disable_inline = is_jit_optimizer_disabled (method);
8150 /* serialization and xdomain stuff may need access to private fields and methods */
8151 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8152 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8153 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8154 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8155 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8156 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8158 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8159 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8160 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8161 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8162 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8164 image = method->klass->image;
8165 header = mono_method_get_header (method);
8167 if (mono_loader_get_last_error ()) {
8168 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8169 mono_error_set_from_loader_error (&cfg->error);
8171 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name));
8173 goto exception_exit;
8175 generic_container = mono_method_get_generic_container (method);
8176 sig = mono_method_signature (method);
8177 num_args = sig->hasthis + sig->param_count;
8178 ip = (unsigned char*)header->code;
8179 cfg->cil_start = ip;
8180 end = ip + header->code_size;
8181 cfg->stat_cil_code_size += header->code_size;
8183 seq_points = cfg->gen_seq_points && cfg->method == method;
8185 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8186 /* We could hit a seq point before attaching to the JIT (#8338) */
8190 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8191 minfo = mono_debug_lookup_method (method);
8193 MonoSymSeqPoint *sps;
8194 int i, n_il_offsets;
8196 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8197 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8198 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8199 sym_seq_points = TRUE;
8200 for (i = 0; i < n_il_offsets; ++i) {
8201 if (sps [i].il_offset < header->code_size)
8202 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8205 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8206 /* Methods without line number info like auto-generated property accessors */
8207 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8208 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8209 sym_seq_points = TRUE;
8214 * Methods without init_locals set could cause asserts in various passes
8215 * (#497220). To work around this, we emit dummy initialization opcodes
8216 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8217 * on some platforms.
8219 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8220 init_locals = header->init_locals;
8224 method_definition = method;
8225 while (method_definition->is_inflated) {
8226 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8227 method_definition = imethod->declaring;
8230 /* SkipVerification is not allowed if core-clr is enabled */
8231 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8233 dont_verify_stloc = TRUE;
8236 if (sig->is_inflated)
8237 generic_context = mono_method_get_context (method);
8238 else if (generic_container)
8239 generic_context = &generic_container->context;
8240 cfg->generic_context = generic_context;
8243 g_assert (!sig->has_type_parameters);
8245 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8246 g_assert (method->is_inflated);
8247 g_assert (mono_method_get_context (method)->method_inst);
8249 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8250 g_assert (sig->generic_param_count);
8252 if (cfg->method == method) {
8253 cfg->real_offset = 0;
8255 cfg->real_offset = inline_offset;
8258 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8259 cfg->cil_offset_to_bb_len = header->code_size;
8261 cfg->current_method = method;
8263 if (cfg->verbose_level > 2)
8264 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8266 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8268 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8269 for (n = 0; n < sig->param_count; ++n)
8270 param_types [n + sig->hasthis] = sig->params [n];
8271 cfg->arg_types = param_types;
8273 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8274 if (cfg->method == method) {
8276 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8277 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8280 NEW_BBLOCK (cfg, start_bblock);
8281 cfg->bb_entry = start_bblock;
8282 start_bblock->cil_code = NULL;
8283 start_bblock->cil_length = 0;
8286 NEW_BBLOCK (cfg, end_bblock);
8287 cfg->bb_exit = end_bblock;
8288 end_bblock->cil_code = NULL;
8289 end_bblock->cil_length = 0;
8290 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8291 g_assert (cfg->num_bblocks == 2);
8293 arg_array = cfg->args;
8295 if (header->num_clauses) {
8296 cfg->spvars = g_hash_table_new (NULL, NULL);
8297 cfg->exvars = g_hash_table_new (NULL, NULL);
8299 /* handle exception clauses */
8300 for (i = 0; i < header->num_clauses; ++i) {
8301 MonoBasicBlock *try_bb;
8302 MonoExceptionClause *clause = &header->clauses [i];
8303 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8305 try_bb->real_offset = clause->try_offset;
8306 try_bb->try_start = TRUE;
8307 try_bb->region = ((i + 1) << 8) | clause->flags;
8308 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8309 tblock->real_offset = clause->handler_offset;
8310 tblock->flags |= BB_EXCEPTION_HANDLER;
8313 * Linking the try block with the EH block hinders inlining as we won't be able to
8314 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8316 if (COMPILE_LLVM (cfg))
8317 link_bblock (cfg, try_bb, tblock);
8319 if (*(ip + clause->handler_offset) == CEE_POP)
8320 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8322 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8323 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8324 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8325 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8326 MONO_ADD_INS (tblock, ins);
8328 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8329 /* finally clauses already have a seq point */
8330 /* seq points for filter clauses are emitted below */
8331 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8332 MONO_ADD_INS (tblock, ins);
8335 /* todo: is a fault block unsafe to optimize? */
8336 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8337 tblock->flags |= BB_EXCEPTION_UNSAFE;
8340 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8342 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8344 /* catch and filter blocks get the exception object on the stack */
8345 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8346 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8348 /* mostly like handle_stack_args (), but just sets the input args */
8349 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8350 tblock->in_scount = 1;
8351 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8352 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8356 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8357 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8358 if (!cfg->compile_llvm) {
8359 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8360 ins->dreg = tblock->in_stack [0]->dreg;
8361 MONO_ADD_INS (tblock, ins);
8364 MonoInst *dummy_use;
8367 * Add a dummy use for the exvar so its liveness info will be
8370 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8373 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8374 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8375 MONO_ADD_INS (tblock, ins);
8378 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8379 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8380 tblock->flags |= BB_EXCEPTION_HANDLER;
8381 tblock->real_offset = clause->data.filter_offset;
8382 tblock->in_scount = 1;
8383 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8384 /* The filter block shares the exvar with the handler block */
8385 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8386 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8387 MONO_ADD_INS (tblock, ins);
8391 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8392 clause->data.catch_class &&
8394 mono_class_check_context_used (clause->data.catch_class)) {
8396 * In shared generic code with catch
8397 * clauses containing type variables
8398 * the exception handling code has to
8399 * be able to get to the rgctx.
8400 * Therefore we have to make sure that
8401 * the vtable/mrgctx argument (for
8402 * static or generic methods) or the
8403 * "this" argument (for non-static
8404 * methods) are live.
8406 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8407 mini_method_get_context (method)->method_inst ||
8408 method->klass->valuetype) {
8409 mono_get_vtable_var (cfg);
8411 MonoInst *dummy_use;
8413 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8418 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8419 cfg->cbb = start_bblock;
8420 cfg->args = arg_array;
8421 mono_save_args (cfg, sig, inline_args);
8424 /* FIRST CODE BLOCK */
8425 NEW_BBLOCK (cfg, tblock);
8426 tblock->cil_code = ip;
8430 ADD_BBLOCK (cfg, tblock);
8432 if (cfg->method == method) {
8433 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8434 if (breakpoint_id) {
8435 MONO_INST_NEW (cfg, ins, OP_BREAK);
8436 MONO_ADD_INS (cfg->cbb, ins);
8440 /* we use a separate basic block for the initialization code */
8441 NEW_BBLOCK (cfg, init_localsbb);
8442 cfg->bb_init = init_localsbb;
8443 init_localsbb->real_offset = cfg->real_offset;
8444 start_bblock->next_bb = init_localsbb;
8445 init_localsbb->next_bb = cfg->cbb;
8446 link_bblock (cfg, start_bblock, init_localsbb);
8447 link_bblock (cfg, init_localsbb, cfg->cbb);
8449 cfg->cbb = init_localsbb;
8451 if (cfg->gsharedvt && cfg->method == method) {
8452 MonoGSharedVtMethodInfo *info;
8453 MonoInst *var, *locals_var;
8456 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8457 info->method = cfg->method;
8458 info->count_entries = 16;
8459 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8460 cfg->gsharedvt_info = info;
8462 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8463 /* prevent it from being register allocated */
8464 //var->flags |= MONO_INST_VOLATILE;
8465 cfg->gsharedvt_info_var = var;
8467 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8468 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8470 /* Allocate locals */
8471 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8472 /* prevent it from being register allocated */
8473 //locals_var->flags |= MONO_INST_VOLATILE;
8474 cfg->gsharedvt_locals_var = locals_var;
8476 dreg = alloc_ireg (cfg);
8477 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8479 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8480 ins->dreg = locals_var->dreg;
8482 MONO_ADD_INS (cfg->cbb, ins);
8483 cfg->gsharedvt_locals_var_ins = ins;
8485 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8488 ins->flags |= MONO_INST_INIT;
8492 if (mono_security_core_clr_enabled ()) {
8493 /* check if this is native code, e.g. an icall or a p/invoke */
8494 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8495 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8497 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8498 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8500 /* if this ia a native call then it can only be JITted from platform code */
8501 if ((icall || pinvk) && method->klass && method->klass->image) {
8502 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8503 MonoException *ex = icall ? mono_get_exception_security () :
8504 mono_get_exception_method_access ();
8505 emit_throw_exception (cfg, ex);
8512 CHECK_CFG_EXCEPTION;
8514 if (header->code_size == 0)
8517 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8522 if (cfg->method == method)
8523 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8525 for (n = 0; n < header->num_locals; ++n) {
8526 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8531 /* We force the vtable variable here for all shared methods
8532 for the possibility that they might show up in a stack
8533 trace where their exact instantiation is needed. */
8534 if (cfg->gshared && method == cfg->method) {
8535 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8536 mini_method_get_context (method)->method_inst ||
8537 method->klass->valuetype) {
8538 mono_get_vtable_var (cfg);
8540 /* FIXME: Is there a better way to do this?
8541 We need the variable live for the duration
8542 of the whole method. */
8543 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8547 /* add a check for this != NULL to inlined methods */
8548 if (is_virtual_call) {
8551 NEW_ARGLOAD (cfg, arg_ins, 0);
8552 MONO_ADD_INS (cfg->cbb, arg_ins);
8553 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8556 skip_dead_blocks = !dont_verify;
8557 if (skip_dead_blocks) {
8558 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8563 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8564 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8567 start_new_bblock = 0;
8569 if (cfg->method == method)
8570 cfg->real_offset = ip - header->code;
8572 cfg->real_offset = inline_offset;
8577 if (start_new_bblock) {
8578 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8579 if (start_new_bblock == 2) {
8580 g_assert (ip == tblock->cil_code);
8582 GET_BBLOCK (cfg, tblock, ip);
8584 cfg->cbb->next_bb = tblock;
8586 start_new_bblock = 0;
8587 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8588 if (cfg->verbose_level > 3)
8589 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8590 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8594 g_slist_free (class_inits);
8597 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8598 link_bblock (cfg, cfg->cbb, tblock);
8599 if (sp != stack_start) {
8600 handle_stack_args (cfg, stack_start, sp - stack_start);
8602 CHECK_UNVERIFIABLE (cfg);
8604 cfg->cbb->next_bb = tblock;
8606 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8607 if (cfg->verbose_level > 3)
8608 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8609 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8612 g_slist_free (class_inits);
8617 if (skip_dead_blocks) {
8618 int ip_offset = ip - header->code;
8620 if (ip_offset == bb->end)
8624 int op_size = mono_opcode_size (ip, end);
8625 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8627 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8629 if (ip_offset + op_size == bb->end) {
8630 MONO_INST_NEW (cfg, ins, OP_NOP);
8631 MONO_ADD_INS (cfg->cbb, ins);
8632 start_new_bblock = 1;
8640 * Sequence points are points where the debugger can place a breakpoint.
8641 * Currently, we generate these automatically at points where the IL
8644 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8646 * Make methods interruptable at the beginning, and at the targets of
8647 * backward branches.
8648 * Also, do this at the start of every bblock in methods with clauses too,
8649 * to be able to handle instructions with inprecise control flow like
8651 * Backward branches are handled at the end of method-to-ir ().
8653 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8654 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8656 /* Avoid sequence points on empty IL like .volatile */
8657 // FIXME: Enable this
8658 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8659 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8660 if ((sp != stack_start) && !sym_seq_point)
8661 ins->flags |= MONO_INST_NONEMPTY_STACK;
8662 MONO_ADD_INS (cfg->cbb, ins);
8665 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8668 cfg->cbb->real_offset = cfg->real_offset;
8670 if ((cfg->method == method) && cfg->coverage_info) {
8671 guint32 cil_offset = ip - header->code;
8672 cfg->coverage_info->data [cil_offset].cil_code = ip;
8674 /* TODO: Use an increment here */
8675 #if defined(TARGET_X86)
8676 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8677 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8679 MONO_ADD_INS (cfg->cbb, ins);
8681 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8682 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8686 if (cfg->verbose_level > 3)
8687 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8691 if (seq_points && !sym_seq_points && sp != stack_start) {
8693 * The C# compiler uses these nops to notify the JIT that it should
8694 * insert seq points.
8696 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8697 MONO_ADD_INS (cfg->cbb, ins);
8699 if (cfg->keep_cil_nops)
8700 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8702 MONO_INST_NEW (cfg, ins, OP_NOP);
8704 MONO_ADD_INS (cfg->cbb, ins);
8707 if (should_insert_brekpoint (cfg->method)) {
8708 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8710 MONO_INST_NEW (cfg, ins, OP_NOP);
8713 MONO_ADD_INS (cfg->cbb, ins);
8719 CHECK_STACK_OVF (1);
8720 n = (*ip)-CEE_LDARG_0;
8722 EMIT_NEW_ARGLOAD (cfg, ins, n);
8730 CHECK_STACK_OVF (1);
8731 n = (*ip)-CEE_LDLOC_0;
8733 EMIT_NEW_LOCLOAD (cfg, ins, n);
8742 n = (*ip)-CEE_STLOC_0;
8745 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8747 emit_stloc_ir (cfg, sp, header, n);
8754 CHECK_STACK_OVF (1);
8757 EMIT_NEW_ARGLOAD (cfg, ins, n);
8763 CHECK_STACK_OVF (1);
8766 NEW_ARGLOADA (cfg, ins, n);
8767 MONO_ADD_INS (cfg->cbb, ins);
8777 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8779 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8784 CHECK_STACK_OVF (1);
8787 EMIT_NEW_LOCLOAD (cfg, ins, n);
8791 case CEE_LDLOCA_S: {
8792 unsigned char *tmp_ip;
8794 CHECK_STACK_OVF (1);
8795 CHECK_LOCAL (ip [1]);
8797 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8803 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8812 CHECK_LOCAL (ip [1]);
8813 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8815 emit_stloc_ir (cfg, sp, header, ip [1]);
8820 CHECK_STACK_OVF (1);
8821 EMIT_NEW_PCONST (cfg, ins, NULL);
8822 ins->type = STACK_OBJ;
8827 CHECK_STACK_OVF (1);
8828 EMIT_NEW_ICONST (cfg, ins, -1);
8841 CHECK_STACK_OVF (1);
8842 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8848 CHECK_STACK_OVF (1);
8850 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8856 CHECK_STACK_OVF (1);
8857 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8863 CHECK_STACK_OVF (1);
8864 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8865 ins->type = STACK_I8;
8866 ins->dreg = alloc_dreg (cfg, STACK_I8);
8868 ins->inst_l = (gint64)read64 (ip);
8869 MONO_ADD_INS (cfg->cbb, ins);
8875 gboolean use_aotconst = FALSE;
8877 #ifdef TARGET_POWERPC
8878 /* FIXME: Clean this up */
8879 if (cfg->compile_aot)
8880 use_aotconst = TRUE;
8883 /* FIXME: we should really allocate this only late in the compilation process */
8884 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8886 CHECK_STACK_OVF (1);
8892 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8894 dreg = alloc_freg (cfg);
8895 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8896 ins->type = cfg->r4_stack_type;
8898 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8899 ins->type = cfg->r4_stack_type;
8900 ins->dreg = alloc_dreg (cfg, STACK_R8);
8902 MONO_ADD_INS (cfg->cbb, ins);
8912 gboolean use_aotconst = FALSE;
8914 #ifdef TARGET_POWERPC
8915 /* FIXME: Clean this up */
8916 if (cfg->compile_aot)
8917 use_aotconst = TRUE;
8920 /* FIXME: we should really allocate this only late in the compilation process */
8921 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8923 CHECK_STACK_OVF (1);
8929 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8931 dreg = alloc_freg (cfg);
8932 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8933 ins->type = STACK_R8;
8935 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8936 ins->type = STACK_R8;
8937 ins->dreg = alloc_dreg (cfg, STACK_R8);
8939 MONO_ADD_INS (cfg->cbb, ins);
8948 MonoInst *temp, *store;
8950 CHECK_STACK_OVF (1);
8954 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8955 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8957 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8960 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8973 if (sp [0]->type == STACK_R8)
8974 /* we need to pop the value from the x86 FP stack */
8975 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8980 MonoMethodSignature *fsig;
8983 INLINE_FAILURE ("jmp");
8984 GSHAREDVT_FAILURE (*ip);
8987 if (stack_start != sp)
8989 token = read32 (ip + 1);
8990 /* FIXME: check the signature matches */
8991 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8994 if (cfg->gshared && mono_method_check_context_used (cmethod))
8995 GENERIC_SHARING_FAILURE (CEE_JMP);
8997 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8999 fsig = mono_method_signature (cmethod);
9000 n = fsig->param_count + fsig->hasthis;
9001 if (cfg->llvm_only) {
9004 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9005 for (i = 0; i < n; ++i)
9006 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9007 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9009 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9010 * have to emit a normal return since llvm expects it.
9013 emit_setret (cfg, ins);
9014 MONO_INST_NEW (cfg, ins, OP_BR);
9015 ins->inst_target_bb = end_bblock;
9016 MONO_ADD_INS (cfg->cbb, ins);
9017 link_bblock (cfg, cfg->cbb, end_bblock);
9020 } else if (cfg->backend->have_op_tail_call) {
9021 /* Handle tail calls similarly to calls */
9024 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9025 call->method = cmethod;
9026 call->tail_call = TRUE;
9027 call->signature = mono_method_signature (cmethod);
9028 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9029 call->inst.inst_p0 = cmethod;
9030 for (i = 0; i < n; ++i)
9031 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9033 mono_arch_emit_call (cfg, call);
9034 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9035 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9037 for (i = 0; i < num_args; ++i)
9038 /* Prevent arguments from being optimized away */
9039 arg_array [i]->flags |= MONO_INST_VOLATILE;
9041 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9042 ins = (MonoInst*)call;
9043 ins->inst_p0 = cmethod;
9044 MONO_ADD_INS (cfg->cbb, ins);
9048 start_new_bblock = 1;
9053 MonoMethodSignature *fsig;
9056 token = read32 (ip + 1);
9060 //GSHAREDVT_FAILURE (*ip);
9065 fsig = mini_get_signature (method, token, generic_context);
9067 if (method->dynamic && fsig->pinvoke) {
9071 * This is a call through a function pointer using a pinvoke
9072 * signature. Have to create a wrapper and call that instead.
9073 * FIXME: This is very slow, need to create a wrapper at JIT time
9074 * instead based on the signature.
9076 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9077 EMIT_NEW_PCONST (cfg, args [1], fsig);
9079 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9082 n = fsig->param_count + fsig->hasthis;
9086 //g_assert (!virtual_ || fsig->hasthis);
9090 inline_costs += 10 * num_calls++;
9093 * Making generic calls out of gsharedvt methods.
9094 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9095 * patching gshared method addresses into a gsharedvt method.
9097 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9099 * We pass the address to the gsharedvt trampoline in the rgctx reg
9101 MonoInst *callee = addr;
9103 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9105 GSHAREDVT_FAILURE (*ip);
9109 GSHAREDVT_FAILURE (*ip);
9111 addr = emit_get_rgctx_sig (cfg, context_used,
9112 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9113 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9117 /* Prevent inlining of methods with indirect calls */
9118 INLINE_FAILURE ("indirect call");
9120 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9121 MonoJumpInfoType info_type;
9125 * Instead of emitting an indirect call, emit a direct call
9126 * with the contents of the aotconst as the patch info.
9128 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9129 info_type = (MonoJumpInfoType)addr->inst_c1;
9130 info_data = addr->inst_p0;
9132 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9133 info_data = addr->inst_right->inst_left;
9136 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9137 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9142 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9146 /* End of call, INS should contain the result of the call, if any */
9148 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9150 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9153 CHECK_CFG_EXCEPTION;
9157 constrained_class = NULL;
9161 case CEE_CALLVIRT: {
9162 MonoInst *addr = NULL;
9163 MonoMethodSignature *fsig = NULL;
9165 int virtual_ = *ip == CEE_CALLVIRT;
9166 gboolean pass_imt_from_rgctx = FALSE;
9167 MonoInst *imt_arg = NULL;
9168 MonoInst *keep_this_alive = NULL;
9169 gboolean pass_vtable = FALSE;
9170 gboolean pass_mrgctx = FALSE;
9171 MonoInst *vtable_arg = NULL;
9172 gboolean check_this = FALSE;
9173 gboolean supported_tail_call = FALSE;
9174 gboolean tail_call = FALSE;
9175 gboolean need_seq_point = FALSE;
9176 guint32 call_opcode = *ip;
9177 gboolean emit_widen = TRUE;
9178 gboolean push_res = TRUE;
9179 gboolean skip_ret = FALSE;
9180 gboolean delegate_invoke = FALSE;
9181 gboolean direct_icall = FALSE;
9182 gboolean constrained_partial_call = FALSE;
9183 MonoMethod *cil_method;
9186 token = read32 (ip + 1);
9190 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9193 cil_method = cmethod;
9195 if (constrained_class) {
9196 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9197 if (!mini_is_gsharedvt_klass (constrained_class)) {
9198 g_assert (!cmethod->klass->valuetype);
9199 if (!mini_type_is_reference (&constrained_class->byval_arg))
9200 constrained_partial_call = TRUE;
9204 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9205 if (cfg->verbose_level > 2)
9206 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9207 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9208 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9210 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9214 if (cfg->verbose_level > 2)
9215 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9217 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9219 * This is needed since get_method_constrained can't find
9220 * the method in klass representing a type var.
9221 * The type var is guaranteed to be a reference type in this
9224 if (!mini_is_gsharedvt_klass (constrained_class))
9225 g_assert (!cmethod->klass->valuetype);
9227 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9233 if (!cmethod || mono_loader_get_last_error ()) {
9234 if (mono_loader_get_last_error ()) {
9235 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9236 mono_error_set_from_loader_error (&cfg->error);
9242 if (!dont_verify && !cfg->skip_visibility) {
9243 MonoMethod *target_method = cil_method;
9244 if (method->is_inflated) {
9245 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9248 if (!mono_method_can_access_method (method_definition, target_method) &&
9249 !mono_method_can_access_method (method, cil_method))
9250 METHOD_ACCESS_FAILURE (method, cil_method);
9253 if (mono_security_core_clr_enabled ())
9254 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9256 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9257 /* MS.NET seems to silently convert this to a callvirt */
9262 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9263 * converts to a callvirt.
9265 * tests/bug-515884.il is an example of this behavior
9267 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9268 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9269 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9273 if (!cmethod->klass->inited)
9274 if (!mono_class_init (cmethod->klass))
9275 TYPE_LOAD_ERROR (cmethod->klass);
9277 fsig = mono_method_signature (cmethod);
9280 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9281 mini_class_is_system_array (cmethod->klass)) {
9282 array_rank = cmethod->klass->rank;
9283 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9284 direct_icall = TRUE;
9285 } else if (fsig->pinvoke) {
9286 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9287 fsig = mono_method_signature (wrapper);
9288 } else if (constrained_class) {
9290 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9294 if (cfg->llvm_only && !cfg->method->wrapper_type)
9295 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9297 /* See code below */
9298 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9299 MonoBasicBlock *tbb;
9301 GET_BBLOCK (cfg, tbb, ip + 5);
9302 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9304 * We want to extend the try block to cover the call, but we can't do it if the
9305 * call is made directly since its followed by an exception check.
9307 direct_icall = FALSE;
9311 mono_save_token_info (cfg, image, token, cil_method);
9313 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9314 need_seq_point = TRUE;
9316 /* Don't support calls made using type arguments for now */
9318 if (cfg->gsharedvt) {
9319 if (mini_is_gsharedvt_signature (fsig))
9320 GSHAREDVT_FAILURE (*ip);
9324 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9325 g_assert_not_reached ();
9327 n = fsig->param_count + fsig->hasthis;
9329 if (!cfg->gshared && cmethod->klass->generic_container)
9333 g_assert (!mono_method_check_context_used (cmethod));
9337 //g_assert (!virtual_ || fsig->hasthis);
9342 * We have the `constrained.' prefix opcode.
9344 if (constrained_class) {
9345 if (mini_is_gsharedvt_klass (constrained_class)) {
9346 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9347 /* The 'Own method' case below */
9348 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9349 /* 'The type parameter is instantiated as a reference type' case below. */
9351 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9352 CHECK_CFG_EXCEPTION;
9358 if (constrained_partial_call) {
9359 gboolean need_box = TRUE;
9362 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9363 * called method is not known at compile time either. The called method could end up being
9364 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9365 * to box the receiver.
9366 * A simple solution would be to box always and make a normal virtual call, but that would
9367 * be bad performance wise.
9369 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9371 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9376 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9377 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9378 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9379 ins->klass = constrained_class;
9380 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9381 CHECK_CFG_EXCEPTION;
9382 } else if (need_box) {
9384 MonoBasicBlock *is_ref_bb, *end_bb;
9385 MonoInst *nonbox_call;
9388 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9390 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9391 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9393 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9395 NEW_BBLOCK (cfg, is_ref_bb);
9396 NEW_BBLOCK (cfg, end_bb);
9398 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9400 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9403 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9405 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9408 MONO_START_BB (cfg, is_ref_bb);
9409 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9410 ins->klass = constrained_class;
9411 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9412 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9414 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9416 MONO_START_BB (cfg, end_bb);
9419 nonbox_call->dreg = ins->dreg;
9422 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9423 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9424 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9427 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9429 * The type parameter is instantiated as a valuetype,
9430 * but that type doesn't override the method we're
9431 * calling, so we need to box `this'.
9433 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9434 ins->klass = constrained_class;
9435 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9436 CHECK_CFG_EXCEPTION;
9437 } else if (!constrained_class->valuetype) {
9438 int dreg = alloc_ireg_ref (cfg);
9441 * The type parameter is instantiated as a reference
9442 * type. We have a managed pointer on the stack, so
9443 * we need to dereference it here.
9445 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9446 ins->type = STACK_OBJ;
9449 if (cmethod->klass->valuetype) {
9452 /* Interface method */
9455 mono_class_setup_vtable (constrained_class);
9456 CHECK_TYPELOAD (constrained_class);
9457 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9459 TYPE_LOAD_ERROR (constrained_class);
9460 slot = mono_method_get_vtable_slot (cmethod);
9462 TYPE_LOAD_ERROR (cmethod->klass);
9463 cmethod = constrained_class->vtable [ioffset + slot];
9465 if (cmethod->klass == mono_defaults.enum_class) {
9466 /* Enum implements some interfaces, so treat this as the first case */
9467 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9468 ins->klass = constrained_class;
9469 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9470 CHECK_CFG_EXCEPTION;
9475 constrained_class = NULL;
9478 if (check_call_signature (cfg, fsig, sp))
9481 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9482 delegate_invoke = TRUE;
9484 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9485 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9486 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9494 * If the callee is a shared method, then its static cctor
9495 * might not get called after the call was patched.
9497 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9498 emit_class_init (cfg, cmethod->klass);
9499 CHECK_TYPELOAD (cmethod->klass);
9502 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9505 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9507 context_used = mini_method_check_context_used (cfg, cmethod);
9509 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9510 /* Generic method interface
9511 calls are resolved via a
9512 helper function and don't
9514 if (!cmethod_context || !cmethod_context->method_inst)
9515 pass_imt_from_rgctx = TRUE;
9519 * If a shared method calls another
9520 * shared method then the caller must
9521 * have a generic sharing context
9522 * because the magic trampoline
9523 * requires it. FIXME: We shouldn't
9524 * have to force the vtable/mrgctx
9525 * variable here. Instead there
9526 * should be a flag in the cfg to
9527 * request a generic sharing context.
9530 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9531 mono_get_vtable_var (cfg);
9536 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9538 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9540 CHECK_TYPELOAD (cmethod->klass);
9541 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9546 g_assert (!vtable_arg);
9548 if (!cfg->compile_aot) {
9550 * emit_get_rgctx_method () calls mono_class_vtable () so check
9551 * for type load errors before.
9553 mono_class_setup_vtable (cmethod->klass);
9554 CHECK_TYPELOAD (cmethod->klass);
9557 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9559 /* !marshalbyref is needed to properly handle generic methods + remoting */
9560 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9561 MONO_METHOD_IS_FINAL (cmethod)) &&
9562 !mono_class_is_marshalbyref (cmethod->klass)) {
9569 if (pass_imt_from_rgctx) {
9570 g_assert (!pass_vtable);
9572 imt_arg = emit_get_rgctx_method (cfg, context_used,
9573 cmethod, MONO_RGCTX_INFO_METHOD);
9577 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9579 /* Calling virtual generic methods */
9580 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9581 !(MONO_METHOD_IS_FINAL (cmethod) &&
9582 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9583 fsig->generic_param_count &&
9584 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9586 MonoInst *this_temp, *this_arg_temp, *store;
9587 MonoInst *iargs [4];
9589 g_assert (fsig->is_inflated);
9591 /* Prevent inlining of methods that contain indirect calls */
9592 INLINE_FAILURE ("virtual generic call");
9594 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9595 GSHAREDVT_FAILURE (*ip);
9597 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9598 g_assert (!imt_arg);
9600 g_assert (cmethod->is_inflated);
9601 imt_arg = emit_get_rgctx_method (cfg, context_used,
9602 cmethod, MONO_RGCTX_INFO_METHOD);
9603 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9605 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9606 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9607 MONO_ADD_INS (cfg->cbb, store);
9609 /* FIXME: This should be a managed pointer */
9610 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9612 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9613 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9614 cmethod, MONO_RGCTX_INFO_METHOD);
9615 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9616 addr = mono_emit_jit_icall (cfg,
9617 mono_helper_compile_generic_method, iargs);
9619 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9621 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9628 * Implement a workaround for the inherent races involved in locking:
9634 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9635 * try block, the Exit () won't be executed, see:
9636 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9637 * To work around this, we extend such try blocks to include the last x bytes
9638 * of the Monitor.Enter () call.
9640 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9641 MonoBasicBlock *tbb;
9643 GET_BBLOCK (cfg, tbb, ip + 5);
9645 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9646 * from Monitor.Enter like ArgumentNullException.
9648 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9649 /* Mark this bblock as needing to be extended */
9650 tbb->extend_try_block = TRUE;
9654 /* Conversion to a JIT intrinsic */
9655 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9656 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9657 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9664 if ((cfg->opt & MONO_OPT_INLINE) &&
9665 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9666 mono_method_check_inlining (cfg, cmethod)) {
9668 gboolean always = FALSE;
9670 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9671 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9672 /* Prevent inlining of methods that call wrappers */
9673 INLINE_FAILURE ("wrapper call");
9674 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9678 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9680 cfg->real_offset += 5;
9682 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9683 /* *sp is already set by inline_method */
9688 inline_costs += costs;
9694 /* Tail recursion elimination */
9695 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9696 gboolean has_vtargs = FALSE;
9699 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9700 INLINE_FAILURE ("tail call");
9702 /* keep it simple */
9703 for (i = fsig->param_count - 1; i >= 0; i--) {
9704 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9709 for (i = 0; i < n; ++i)
9710 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9711 MONO_INST_NEW (cfg, ins, OP_BR);
9712 MONO_ADD_INS (cfg->cbb, ins);
9713 tblock = start_bblock->out_bb [0];
9714 link_bblock (cfg, cfg->cbb, tblock);
9715 ins->inst_target_bb = tblock;
9716 start_new_bblock = 1;
9718 /* skip the CEE_RET, too */
9719 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9726 inline_costs += 10 * num_calls++;
9729 * Making generic calls out of gsharedvt methods.
9730 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9731 * patching gshared method addresses into a gsharedvt method.
9733 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9734 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9735 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9736 MonoRgctxInfoType info_type;
9739 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9740 //GSHAREDVT_FAILURE (*ip);
9741 // disable for possible remoting calls
9742 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9743 GSHAREDVT_FAILURE (*ip);
9744 if (fsig->generic_param_count) {
9745 /* virtual generic call */
9746 g_assert (!imt_arg);
9747 /* Same as the virtual generic case above */
9748 imt_arg = emit_get_rgctx_method (cfg, context_used,
9749 cmethod, MONO_RGCTX_INFO_METHOD);
9750 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9752 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9753 /* This can happen when we call a fully instantiated iface method */
9754 imt_arg = emit_get_rgctx_method (cfg, context_used,
9755 cmethod, MONO_RGCTX_INFO_METHOD);
9760 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9761 keep_this_alive = sp [0];
9763 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9764 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9766 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9767 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9769 if (cfg->llvm_only) {
9770 // FIXME: Avoid initializing vtable_arg
9771 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9773 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9778 /* Generic sharing */
9781 * Use this if the callee is gsharedvt sharable too, since
9782 * at runtime we might find an instantiation so the call cannot
9783 * be patched (the 'no_patch' code path in mini-trampolines.c).
9785 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9786 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9787 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9788 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9789 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9790 INLINE_FAILURE ("gshared");
9792 g_assert (cfg->gshared && cmethod);
9796 * We are compiling a call to a
9797 * generic method from shared code,
9798 * which means that we have to look up
9799 * the method in the rgctx and do an
9803 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9805 if (cfg->llvm_only) {
9806 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9807 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9809 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9810 // FIXME: Avoid initializing imt_arg/vtable_arg
9811 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9813 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9814 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9819 /* Direct calls to icalls */
9821 MonoMethod *wrapper;
9824 /* Inline the wrapper */
9825 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9827 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9828 g_assert (costs > 0);
9829 cfg->real_offset += 5;
9831 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9832 /* *sp is already set by inline_method */
9837 inline_costs += costs;
9846 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9847 MonoInst *val = sp [fsig->param_count];
9849 if (val->type == STACK_OBJ) {
9850 MonoInst *iargs [2];
9855 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9858 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9859 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9860 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9861 emit_write_barrier (cfg, addr, val);
9862 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9863 GSHAREDVT_FAILURE (*ip);
9864 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9865 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9867 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9868 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9869 if (!cmethod->klass->element_class->valuetype && !readonly)
9870 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9871 CHECK_TYPELOAD (cmethod->klass);
9874 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9877 g_assert_not_reached ();
9884 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9888 /* Tail prefix / tail call optimization */
9890 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9891 /* FIXME: runtime generic context pointer for jumps? */
9892 /* FIXME: handle this for generic sharing eventually */
9893 if ((ins_flag & MONO_INST_TAILCALL) &&
9894 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9895 supported_tail_call = TRUE;
9897 if (supported_tail_call) {
9900 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9901 INLINE_FAILURE ("tail call");
9903 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9905 if (cfg->backend->have_op_tail_call) {
9906 /* Handle tail calls similarly to normal calls */
9909 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9911 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9912 call->tail_call = TRUE;
9913 call->method = cmethod;
9914 call->signature = mono_method_signature (cmethod);
9917 * We implement tail calls by storing the actual arguments into the
9918 * argument variables, then emitting a CEE_JMP.
9920 for (i = 0; i < n; ++i) {
9921 /* Prevent argument from being register allocated */
9922 arg_array [i]->flags |= MONO_INST_VOLATILE;
9923 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9925 ins = (MonoInst*)call;
9926 ins->inst_p0 = cmethod;
9927 ins->inst_p1 = arg_array [0];
9928 MONO_ADD_INS (cfg->cbb, ins);
9929 link_bblock (cfg, cfg->cbb, end_bblock);
9930 start_new_bblock = 1;
9932 // FIXME: Eliminate unreachable epilogs
9935 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9936 * only reachable from this call.
9938 GET_BBLOCK (cfg, tblock, ip + 5);
9939 if (tblock == cfg->cbb || tblock->in_count == 0)
9948 * Synchronized wrappers.
9949 * Its hard to determine where to replace a method with its synchronized
9950 * wrapper without causing an infinite recursion. The current solution is
9951 * to add the synchronized wrapper in the trampolines, and to
9952 * change the called method to a dummy wrapper, and resolve that wrapper
9953 * to the real method in mono_jit_compile_method ().
9955 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9956 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9957 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9958 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9962 * Virtual calls in llvm-only mode.
9964 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9965 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9970 INLINE_FAILURE ("call");
9971 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9972 imt_arg, vtable_arg);
9974 if (tail_call && !cfg->llvm_only) {
9975 link_bblock (cfg, cfg->cbb, end_bblock);
9976 start_new_bblock = 1;
9978 // FIXME: Eliminate unreachable epilogs
9981 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9982 * only reachable from this call.
9984 GET_BBLOCK (cfg, tblock, ip + 5);
9985 if (tblock == cfg->cbb || tblock->in_count == 0)
9992 /* End of call, INS should contain the result of the call, if any */
9994 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9997 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10002 if (keep_this_alive) {
10003 MonoInst *dummy_use;
10005 /* See mono_emit_method_call_full () */
10006 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10009 CHECK_CFG_EXCEPTION;
10013 g_assert (*ip == CEE_RET);
10017 constrained_class = NULL;
10018 if (need_seq_point)
10019 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10023 if (cfg->method != method) {
10024 /* return from inlined method */
10026 * If in_count == 0, that means the ret is unreachable due to
10027 * being preceeded by a throw. In that case, inline_method () will
10028 * handle setting the return value
10029 * (test case: test_0_inline_throw ()).
10031 if (return_var && cfg->cbb->in_count) {
10032 MonoType *ret_type = mono_method_signature (method)->ret;
10038 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10041 //g_assert (returnvar != -1);
10042 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10043 cfg->ret_var_set = TRUE;
10046 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10048 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10049 emit_pop_lmf (cfg);
10052 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10054 if (seq_points && !sym_seq_points) {
10056 * Place a seq point here too even through the IL stack is not
10057 * empty, so a step over on
10060 * will work correctly.
10062 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10063 MONO_ADD_INS (cfg->cbb, ins);
10066 g_assert (!return_var);
10070 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10073 emit_setret (cfg, *sp);
10076 if (sp != stack_start)
10078 MONO_INST_NEW (cfg, ins, OP_BR);
10080 ins->inst_target_bb = end_bblock;
10081 MONO_ADD_INS (cfg->cbb, ins);
10082 link_bblock (cfg, cfg->cbb, end_bblock);
10083 start_new_bblock = 1;
10087 MONO_INST_NEW (cfg, ins, OP_BR);
10089 target = ip + 1 + (signed char)(*ip);
10091 GET_BBLOCK (cfg, tblock, target);
10092 link_bblock (cfg, cfg->cbb, tblock);
10093 ins->inst_target_bb = tblock;
10094 if (sp != stack_start) {
10095 handle_stack_args (cfg, stack_start, sp - stack_start);
10097 CHECK_UNVERIFIABLE (cfg);
10099 MONO_ADD_INS (cfg->cbb, ins);
10100 start_new_bblock = 1;
10101 inline_costs += BRANCH_COST;
10115 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10117 target = ip + 1 + *(signed char*)ip;
10120 ADD_BINCOND (NULL);
10123 inline_costs += BRANCH_COST;
10127 MONO_INST_NEW (cfg, ins, OP_BR);
10130 target = ip + 4 + (gint32)read32(ip);
10132 GET_BBLOCK (cfg, tblock, target);
10133 link_bblock (cfg, cfg->cbb, tblock);
10134 ins->inst_target_bb = tblock;
10135 if (sp != stack_start) {
10136 handle_stack_args (cfg, stack_start, sp - stack_start);
10138 CHECK_UNVERIFIABLE (cfg);
10141 MONO_ADD_INS (cfg->cbb, ins);
10143 start_new_bblock = 1;
10144 inline_costs += BRANCH_COST;
10146 case CEE_BRFALSE_S:
10151 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10152 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10153 guint32 opsize = is_short ? 1 : 4;
10155 CHECK_OPSIZE (opsize);
10157 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10160 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10165 GET_BBLOCK (cfg, tblock, target);
10166 link_bblock (cfg, cfg->cbb, tblock);
10167 GET_BBLOCK (cfg, tblock, ip);
10168 link_bblock (cfg, cfg->cbb, tblock);
10170 if (sp != stack_start) {
10171 handle_stack_args (cfg, stack_start, sp - stack_start);
10172 CHECK_UNVERIFIABLE (cfg);
10175 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10176 cmp->sreg1 = sp [0]->dreg;
10177 type_from_op (cfg, cmp, sp [0], NULL);
10180 #if SIZEOF_REGISTER == 4
10181 if (cmp->opcode == OP_LCOMPARE_IMM) {
10182 /* Convert it to OP_LCOMPARE */
10183 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10184 ins->type = STACK_I8;
10185 ins->dreg = alloc_dreg (cfg, STACK_I8);
10187 MONO_ADD_INS (cfg->cbb, ins);
10188 cmp->opcode = OP_LCOMPARE;
10189 cmp->sreg2 = ins->dreg;
10192 MONO_ADD_INS (cfg->cbb, cmp);
10194 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10195 type_from_op (cfg, ins, sp [0], NULL);
10196 MONO_ADD_INS (cfg->cbb, ins);
10197 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10198 GET_BBLOCK (cfg, tblock, target);
10199 ins->inst_true_bb = tblock;
10200 GET_BBLOCK (cfg, tblock, ip);
10201 ins->inst_false_bb = tblock;
10202 start_new_bblock = 2;
10205 inline_costs += BRANCH_COST;
10220 MONO_INST_NEW (cfg, ins, *ip);
10222 target = ip + 4 + (gint32)read32(ip);
10225 ADD_BINCOND (NULL);
10228 inline_costs += BRANCH_COST;
10232 MonoBasicBlock **targets;
10233 MonoBasicBlock *default_bblock;
10234 MonoJumpInfoBBTable *table;
10235 int offset_reg = alloc_preg (cfg);
10236 int target_reg = alloc_preg (cfg);
10237 int table_reg = alloc_preg (cfg);
10238 int sum_reg = alloc_preg (cfg);
10239 gboolean use_op_switch;
10243 n = read32 (ip + 1);
10246 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10250 CHECK_OPSIZE (n * sizeof (guint32));
10251 target = ip + n * sizeof (guint32);
10253 GET_BBLOCK (cfg, default_bblock, target);
10254 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10256 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10257 for (i = 0; i < n; ++i) {
10258 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10259 targets [i] = tblock;
10260 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10264 if (sp != stack_start) {
10266 * Link the current bb with the targets as well, so handle_stack_args
10267 * will set their in_stack correctly.
10269 link_bblock (cfg, cfg->cbb, default_bblock);
10270 for (i = 0; i < n; ++i)
10271 link_bblock (cfg, cfg->cbb, targets [i]);
10273 handle_stack_args (cfg, stack_start, sp - stack_start);
10275 CHECK_UNVERIFIABLE (cfg);
10277 /* Undo the links */
10278 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10279 for (i = 0; i < n; ++i)
10280 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10283 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10284 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10286 for (i = 0; i < n; ++i)
10287 link_bblock (cfg, cfg->cbb, targets [i]);
10289 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10290 table->table = targets;
10291 table->table_size = n;
10293 use_op_switch = FALSE;
10295 /* ARM implements SWITCH statements differently */
10296 /* FIXME: Make it use the generic implementation */
10297 if (!cfg->compile_aot)
10298 use_op_switch = TRUE;
10301 if (COMPILE_LLVM (cfg))
10302 use_op_switch = TRUE;
10304 cfg->cbb->has_jump_table = 1;
10306 if (use_op_switch) {
10307 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10308 ins->sreg1 = src1->dreg;
10309 ins->inst_p0 = table;
10310 ins->inst_many_bb = targets;
10311 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10312 MONO_ADD_INS (cfg->cbb, ins);
10314 if (sizeof (gpointer) == 8)
10315 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10319 #if SIZEOF_REGISTER == 8
10320 /* The upper word might not be zero, and we add it to a 64 bit address later */
10321 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10324 if (cfg->compile_aot) {
10325 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10327 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10328 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10329 ins->inst_p0 = table;
10330 ins->dreg = table_reg;
10331 MONO_ADD_INS (cfg->cbb, ins);
10334 /* FIXME: Use load_memindex */
10335 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10336 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10337 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10339 start_new_bblock = 1;
10340 inline_costs += (BRANCH_COST * 2);
10353 case CEE_LDIND_REF:
10360 dreg = alloc_freg (cfg);
10363 dreg = alloc_lreg (cfg);
10365 case CEE_LDIND_REF:
10366 dreg = alloc_ireg_ref (cfg);
10369 dreg = alloc_preg (cfg);
10372 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10373 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10374 if (*ip == CEE_LDIND_R4)
10375 ins->type = cfg->r4_stack_type;
10376 ins->flags |= ins_flag;
10377 MONO_ADD_INS (cfg->cbb, ins);
10379 if (ins_flag & MONO_INST_VOLATILE) {
10380 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10381 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10386 case CEE_STIND_REF:
10397 if (ins_flag & MONO_INST_VOLATILE) {
10398 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10399 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10402 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10403 ins->flags |= ins_flag;
10406 MONO_ADD_INS (cfg->cbb, ins);
10408 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10409 emit_write_barrier (cfg, sp [0], sp [1]);
10418 MONO_INST_NEW (cfg, ins, (*ip));
10420 ins->sreg1 = sp [0]->dreg;
10421 ins->sreg2 = sp [1]->dreg;
10422 type_from_op (cfg, ins, sp [0], sp [1]);
10424 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10426 /* Use the immediate opcodes if possible */
10427 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10428 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10429 if (imm_opcode != -1) {
10430 ins->opcode = imm_opcode;
10431 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10434 NULLIFY_INS (sp [1]);
10438 MONO_ADD_INS ((cfg)->cbb, (ins));
10440 *sp++ = mono_decompose_opcode (cfg, ins);
10457 MONO_INST_NEW (cfg, ins, (*ip));
10459 ins->sreg1 = sp [0]->dreg;
10460 ins->sreg2 = sp [1]->dreg;
10461 type_from_op (cfg, ins, sp [0], sp [1]);
10463 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10464 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10466 /* FIXME: Pass opcode to is_inst_imm */
10468 /* Use the immediate opcodes if possible */
10469 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10470 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10471 if (imm_opcode != -1) {
10472 ins->opcode = imm_opcode;
10473 if (sp [1]->opcode == OP_I8CONST) {
10474 #if SIZEOF_REGISTER == 8
10475 ins->inst_imm = sp [1]->inst_l;
10477 ins->inst_ls_word = sp [1]->inst_ls_word;
10478 ins->inst_ms_word = sp [1]->inst_ms_word;
10482 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10485 /* Might be followed by an instruction added by add_widen_op */
10486 if (sp [1]->next == NULL)
10487 NULLIFY_INS (sp [1]);
10490 MONO_ADD_INS ((cfg)->cbb, (ins));
10492 *sp++ = mono_decompose_opcode (cfg, ins);
10505 case CEE_CONV_OVF_I8:
10506 case CEE_CONV_OVF_U8:
10507 case CEE_CONV_R_UN:
10510 /* Special case this earlier so we have long constants in the IR */
10511 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10512 int data = sp [-1]->inst_c0;
10513 sp [-1]->opcode = OP_I8CONST;
10514 sp [-1]->type = STACK_I8;
10515 #if SIZEOF_REGISTER == 8
10516 if ((*ip) == CEE_CONV_U8)
10517 sp [-1]->inst_c0 = (guint32)data;
10519 sp [-1]->inst_c0 = data;
10521 sp [-1]->inst_ls_word = data;
10522 if ((*ip) == CEE_CONV_U8)
10523 sp [-1]->inst_ms_word = 0;
10525 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10527 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10534 case CEE_CONV_OVF_I4:
10535 case CEE_CONV_OVF_I1:
10536 case CEE_CONV_OVF_I2:
10537 case CEE_CONV_OVF_I:
10538 case CEE_CONV_OVF_U:
10541 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10542 ADD_UNOP (CEE_CONV_OVF_I8);
10549 case CEE_CONV_OVF_U1:
10550 case CEE_CONV_OVF_U2:
10551 case CEE_CONV_OVF_U4:
10554 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10555 ADD_UNOP (CEE_CONV_OVF_U8);
10562 case CEE_CONV_OVF_I1_UN:
10563 case CEE_CONV_OVF_I2_UN:
10564 case CEE_CONV_OVF_I4_UN:
10565 case CEE_CONV_OVF_I8_UN:
10566 case CEE_CONV_OVF_U1_UN:
10567 case CEE_CONV_OVF_U2_UN:
10568 case CEE_CONV_OVF_U4_UN:
10569 case CEE_CONV_OVF_U8_UN:
10570 case CEE_CONV_OVF_I_UN:
10571 case CEE_CONV_OVF_U_UN:
10578 CHECK_CFG_EXCEPTION;
10582 case CEE_ADD_OVF_UN:
10584 case CEE_MUL_OVF_UN:
10586 case CEE_SUB_OVF_UN:
10592 GSHAREDVT_FAILURE (*ip);
10595 token = read32 (ip + 1);
10596 klass = mini_get_class (method, token, generic_context);
10597 CHECK_TYPELOAD (klass);
10599 if (generic_class_is_reference_type (cfg, klass)) {
10600 MonoInst *store, *load;
10601 int dreg = alloc_ireg_ref (cfg);
10603 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10604 load->flags |= ins_flag;
10605 MONO_ADD_INS (cfg->cbb, load);
10607 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10608 store->flags |= ins_flag;
10609 MONO_ADD_INS (cfg->cbb, store);
10611 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10612 emit_write_barrier (cfg, sp [0], sp [1]);
10614 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10620 int loc_index = -1;
10626 token = read32 (ip + 1);
10627 klass = mini_get_class (method, token, generic_context);
10628 CHECK_TYPELOAD (klass);
10630 /* Optimize the common ldobj+stloc combination */
10633 loc_index = ip [6];
10640 loc_index = ip [5] - CEE_STLOC_0;
10647 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10648 CHECK_LOCAL (loc_index);
10650 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10651 ins->dreg = cfg->locals [loc_index]->dreg;
10652 ins->flags |= ins_flag;
10655 if (ins_flag & MONO_INST_VOLATILE) {
10656 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10657 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10663 /* Optimize the ldobj+stobj combination */
10664 /* The reference case ends up being a load+store anyway */
10665 /* Skip this if the operation is volatile. */
10666 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10671 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10678 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10679 ins->flags |= ins_flag;
10682 if (ins_flag & MONO_INST_VOLATILE) {
10683 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10684 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10693 CHECK_STACK_OVF (1);
10695 n = read32 (ip + 1);
10697 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10698 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10699 ins->type = STACK_OBJ;
10702 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10703 MonoInst *iargs [1];
10704 char *str = (char *)mono_method_get_wrapper_data (method, n);
10706 if (cfg->compile_aot)
10707 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10709 EMIT_NEW_PCONST (cfg, iargs [0], str);
10710 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10712 if (cfg->opt & MONO_OPT_SHARED) {
10713 MonoInst *iargs [3];
10715 if (cfg->compile_aot) {
10716 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10718 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10719 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10720 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10721 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10722 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10724 if (cfg->cbb->out_of_line) {
10725 MonoInst *iargs [2];
10727 if (image == mono_defaults.corlib) {
10729 * Avoid relocations in AOT and save some space by using a
10730 * version of helper_ldstr specialized to mscorlib.
10732 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10733 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10735 /* Avoid creating the string object */
10736 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10737 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10738 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10742 if (cfg->compile_aot) {
10743 NEW_LDSTRCONST (cfg, ins, image, n);
10745 MONO_ADD_INS (cfg->cbb, ins);
10748 NEW_PCONST (cfg, ins, NULL);
10749 ins->type = STACK_OBJ;
10750 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10752 OUT_OF_MEMORY_FAILURE;
10755 MONO_ADD_INS (cfg->cbb, ins);
10764 MonoInst *iargs [2];
10765 MonoMethodSignature *fsig;
10768 MonoInst *vtable_arg = NULL;
10771 token = read32 (ip + 1);
10772 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10775 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10778 mono_save_token_info (cfg, image, token, cmethod);
10780 if (!mono_class_init (cmethod->klass))
10781 TYPE_LOAD_ERROR (cmethod->klass);
10783 context_used = mini_method_check_context_used (cfg, cmethod);
10785 if (mono_security_core_clr_enabled ())
10786 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10788 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10789 emit_class_init (cfg, cmethod->klass);
10790 CHECK_TYPELOAD (cmethod->klass);
10794 if (cfg->gsharedvt) {
10795 if (mini_is_gsharedvt_variable_signature (sig))
10796 GSHAREDVT_FAILURE (*ip);
10800 n = fsig->param_count;
10804 * Generate smaller code for the common newobj <exception> instruction in
10805 * argument checking code.
10807 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10808 is_exception_class (cmethod->klass) && n <= 2 &&
10809 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10810 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10811 MonoInst *iargs [3];
10815 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10818 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10821 iargs [1] = sp [0];
10822 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10825 iargs [1] = sp [0];
10826 iargs [2] = sp [1];
10827 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10830 g_assert_not_reached ();
10838 /* move the args to allow room for 'this' in the first position */
10844 /* check_call_signature () requires sp[0] to be set */
10845 this_ins.type = STACK_OBJ;
10846 sp [0] = &this_ins;
10847 if (check_call_signature (cfg, fsig, sp))
10852 if (mini_class_is_system_array (cmethod->klass)) {
10853 *sp = emit_get_rgctx_method (cfg, context_used,
10854 cmethod, MONO_RGCTX_INFO_METHOD);
10856 /* Avoid varargs in the common case */
10857 if (fsig->param_count == 1)
10858 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10859 else if (fsig->param_count == 2)
10860 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10861 else if (fsig->param_count == 3)
10862 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10863 else if (fsig->param_count == 4)
10864 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10866 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10867 } else if (cmethod->string_ctor) {
10868 g_assert (!context_used);
10869 g_assert (!vtable_arg);
10870 /* we simply pass a null pointer */
10871 EMIT_NEW_PCONST (cfg, *sp, NULL);
10872 /* now call the string ctor */
10873 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10875 if (cmethod->klass->valuetype) {
10876 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10877 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10878 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10883 * The code generated by mini_emit_virtual_call () expects
10884 * iargs [0] to be a boxed instance, but luckily the vcall
10885 * will be transformed into a normal call there.
10887 } else if (context_used) {
10888 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10891 MonoVTable *vtable = NULL;
10893 if (!cfg->compile_aot)
10894 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10895 CHECK_TYPELOAD (cmethod->klass);
10898 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10899 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10900 * As a workaround, we call class cctors before allocating objects.
10902 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10903 emit_class_init (cfg, cmethod->klass);
10904 if (cfg->verbose_level > 2)
10905 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10906 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10909 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10912 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10915 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10917 /* Now call the actual ctor */
10918 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10919 CHECK_CFG_EXCEPTION;
10922 if (alloc == NULL) {
10924 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10925 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10933 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10934 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10937 case CEE_CASTCLASS:
10941 token = read32 (ip + 1);
10942 klass = mini_get_class (method, token, generic_context);
10943 CHECK_TYPELOAD (klass);
10944 if (sp [0]->type != STACK_OBJ)
10947 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10948 CHECK_CFG_EXCEPTION;
10957 token = read32 (ip + 1);
10958 klass = mini_get_class (method, token, generic_context);
10959 CHECK_TYPELOAD (klass);
10960 if (sp [0]->type != STACK_OBJ)
10963 context_used = mini_class_check_context_used (cfg, klass);
10965 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10966 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10967 MonoInst *args [3];
10974 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10977 idx = get_castclass_cache_idx (cfg);
10978 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10980 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10983 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10984 MonoMethod *mono_isinst;
10985 MonoInst *iargs [1];
10988 mono_isinst = mono_marshal_get_isinst (klass);
10989 iargs [0] = sp [0];
10991 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10992 iargs, ip, cfg->real_offset, TRUE);
10993 CHECK_CFG_EXCEPTION;
10994 g_assert (costs > 0);
10997 cfg->real_offset += 5;
11001 inline_costs += costs;
11004 ins = handle_isinst (cfg, klass, *sp, context_used);
11005 CHECK_CFG_EXCEPTION;
11011 case CEE_UNBOX_ANY: {
11012 MonoInst *res, *addr;
11017 token = read32 (ip + 1);
11018 klass = mini_get_class (method, token, generic_context);
11019 CHECK_TYPELOAD (klass);
11021 mono_save_token_info (cfg, image, token, klass);
11023 context_used = mini_class_check_context_used (cfg, klass);
11025 if (mini_is_gsharedvt_klass (klass)) {
11026 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11028 } else if (generic_class_is_reference_type (cfg, klass)) {
11029 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11030 CHECK_CFG_EXCEPTION;
11031 } else if (mono_class_is_nullable (klass)) {
11032 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11034 addr = handle_unbox (cfg, klass, sp, context_used);
11036 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11047 MonoClass *enum_class;
11048 MonoMethod *has_flag;
11054 token = read32 (ip + 1);
11055 klass = mini_get_class (method, token, generic_context);
11056 CHECK_TYPELOAD (klass);
11058 mono_save_token_info (cfg, image, token, klass);
11060 context_used = mini_class_check_context_used (cfg, klass);
11062 if (generic_class_is_reference_type (cfg, klass)) {
11068 if (klass == mono_defaults.void_class)
11070 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11072 /* frequent check in generic code: box (struct), brtrue */
11077 * <push int/long ptr>
11080 * constrained. MyFlags
11081 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11083 * If we find this sequence and the operand types on box and constrained
11084 * are equal, we can emit a specialized instruction sequence instead of
11085 * the very slow HasFlag () call.
11087 if ((cfg->opt & MONO_OPT_INTRINS) &&
11088 /* Cheap checks first. */
11089 ip + 5 + 6 + 5 < end &&
11090 ip [5] == CEE_PREFIX1 &&
11091 ip [6] == CEE_CONSTRAINED_ &&
11092 ip [11] == CEE_CALLVIRT &&
11093 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11094 mono_class_is_enum (klass) &&
11095 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11096 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11097 has_flag->klass == mono_defaults.enum_class &&
11098 !strcmp (has_flag->name, "HasFlag") &&
11099 has_flag->signature->hasthis &&
11100 has_flag->signature->param_count == 1) {
11101 CHECK_TYPELOAD (enum_class);
11103 if (enum_class == klass) {
11104 MonoInst *enum_this, *enum_flag;
11109 enum_this = sp [0];
11110 enum_flag = sp [1];
11112 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11117 // FIXME: LLVM can't handle the inconsistent bb linking
11118 if (!mono_class_is_nullable (klass) &&
11119 !mini_is_gsharedvt_klass (klass) &&
11120 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11121 (ip [5] == CEE_BRTRUE ||
11122 ip [5] == CEE_BRTRUE_S ||
11123 ip [5] == CEE_BRFALSE ||
11124 ip [5] == CEE_BRFALSE_S)) {
11125 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11127 MonoBasicBlock *true_bb, *false_bb;
11131 if (cfg->verbose_level > 3) {
11132 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11133 printf ("<box+brtrue opt>\n");
11138 case CEE_BRFALSE_S:
11141 target = ip + 1 + (signed char)(*ip);
11148 target = ip + 4 + (gint)(read32 (ip));
11152 g_assert_not_reached ();
11156 * We need to link both bblocks, since it is needed for handling stack
11157 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11158 * Branching to only one of them would lead to inconsistencies, so
11159 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11161 GET_BBLOCK (cfg, true_bb, target);
11162 GET_BBLOCK (cfg, false_bb, ip);
11164 mono_link_bblock (cfg, cfg->cbb, true_bb);
11165 mono_link_bblock (cfg, cfg->cbb, false_bb);
11167 if (sp != stack_start) {
11168 handle_stack_args (cfg, stack_start, sp - stack_start);
11170 CHECK_UNVERIFIABLE (cfg);
11173 if (COMPILE_LLVM (cfg)) {
11174 dreg = alloc_ireg (cfg);
11175 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11176 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11178 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11180 /* The JIT can't eliminate the iconst+compare */
11181 MONO_INST_NEW (cfg, ins, OP_BR);
11182 ins->inst_target_bb = is_true ? true_bb : false_bb;
11183 MONO_ADD_INS (cfg->cbb, ins);
11186 start_new_bblock = 1;
11190 *sp++ = handle_box (cfg, val, klass, context_used);
11192 CHECK_CFG_EXCEPTION;
11201 token = read32 (ip + 1);
11202 klass = mini_get_class (method, token, generic_context);
11203 CHECK_TYPELOAD (klass);
11205 mono_save_token_info (cfg, image, token, klass);
11207 context_used = mini_class_check_context_used (cfg, klass);
11209 if (mono_class_is_nullable (klass)) {
11212 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11213 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11217 ins = handle_unbox (cfg, klass, sp, context_used);
11230 MonoClassField *field;
11231 #ifndef DISABLE_REMOTING
11235 gboolean is_instance;
11237 gpointer addr = NULL;
11238 gboolean is_special_static;
11240 MonoInst *store_val = NULL;
11241 MonoInst *thread_ins;
11244 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11246 if (op == CEE_STFLD) {
11249 store_val = sp [1];
11254 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11256 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11259 if (op == CEE_STSFLD) {
11262 store_val = sp [0];
11267 token = read32 (ip + 1);
11268 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11269 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11270 klass = field->parent;
11273 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11276 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11277 FIELD_ACCESS_FAILURE (method, field);
11278 mono_class_init (klass);
11280 /* if the class is Critical then transparent code cannot access it's fields */
11281 if (!is_instance && mono_security_core_clr_enabled ())
11282 ensure_method_is_allowed_to_access_field (cfg, method, field);
11284 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11285 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11286 if (mono_security_core_clr_enabled ())
11287 ensure_method_is_allowed_to_access_field (cfg, method, field);
11290 ftype = mono_field_get_type (field);
11293 * LDFLD etc. is usable on static fields as well, so convert those cases to
11296 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11308 g_assert_not_reached ();
11310 is_instance = FALSE;
11313 context_used = mini_class_check_context_used (cfg, klass);
11315 /* INSTANCE CASE */
11317 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11318 if (op == CEE_STFLD) {
11319 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11321 #ifndef DISABLE_REMOTING
11322 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11323 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11324 MonoInst *iargs [5];
11326 GSHAREDVT_FAILURE (op);
11328 iargs [0] = sp [0];
11329 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11330 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11331 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11333 iargs [4] = sp [1];
11335 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11336 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11337 iargs, ip, cfg->real_offset, TRUE);
11338 CHECK_CFG_EXCEPTION;
11339 g_assert (costs > 0);
11341 cfg->real_offset += 5;
11343 inline_costs += costs;
11345 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11352 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11354 if (mini_is_gsharedvt_klass (klass)) {
11355 MonoInst *offset_ins;
11357 context_used = mini_class_check_context_used (cfg, klass);
11359 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11360 /* The value is offset by 1 */
11361 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11362 dreg = alloc_ireg_mp (cfg);
11363 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11364 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11365 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11367 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11369 if (sp [0]->opcode != OP_LDADDR)
11370 store->flags |= MONO_INST_FAULT;
11372 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11373 /* insert call to write barrier */
11377 dreg = alloc_ireg_mp (cfg);
11378 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11379 emit_write_barrier (cfg, ptr, sp [1]);
11382 store->flags |= ins_flag;
11389 #ifndef DISABLE_REMOTING
11390 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11391 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11392 MonoInst *iargs [4];
11394 GSHAREDVT_FAILURE (op);
11396 iargs [0] = sp [0];
11397 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11398 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11399 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11400 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11401 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11402 iargs, ip, cfg->real_offset, TRUE);
11403 CHECK_CFG_EXCEPTION;
11404 g_assert (costs > 0);
11406 cfg->real_offset += 5;
11410 inline_costs += costs;
11412 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11418 if (sp [0]->type == STACK_VTYPE) {
11421 /* Have to compute the address of the variable */
11423 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11425 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11427 g_assert (var->klass == klass);
11429 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11433 if (op == CEE_LDFLDA) {
11434 if (sp [0]->type == STACK_OBJ) {
11435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11436 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11439 dreg = alloc_ireg_mp (cfg);
11441 if (mini_is_gsharedvt_klass (klass)) {
11442 MonoInst *offset_ins;
11444 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11445 /* The value is offset by 1 */
11446 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11447 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11449 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11451 ins->klass = mono_class_from_mono_type (field->type);
11452 ins->type = STACK_MP;
11457 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11459 if (mini_is_gsharedvt_klass (klass)) {
11460 MonoInst *offset_ins;
11462 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11463 /* The value is offset by 1 */
11464 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11465 dreg = alloc_ireg_mp (cfg);
11466 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11467 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11469 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11471 load->flags |= ins_flag;
11472 if (sp [0]->opcode != OP_LDADDR)
11473 load->flags |= MONO_INST_FAULT;
11485 context_used = mini_class_check_context_used (cfg, klass);
11487 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11490 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11491 * to be called here.
11493 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11494 mono_class_vtable (cfg->domain, klass);
11495 CHECK_TYPELOAD (klass);
11497 mono_domain_lock (cfg->domain);
11498 if (cfg->domain->special_static_fields)
11499 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11500 mono_domain_unlock (cfg->domain);
11502 is_special_static = mono_class_field_is_special_static (field);
11504 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11505 thread_ins = mono_get_thread_intrinsic (cfg);
11509 /* Generate IR to compute the field address */
11510 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11512 * Fast access to TLS data
11513 * Inline version of get_thread_static_data () in
11517 int idx, static_data_reg, array_reg, dreg;
11519 GSHAREDVT_FAILURE (op);
11521 MONO_ADD_INS (cfg->cbb, thread_ins);
11522 static_data_reg = alloc_ireg (cfg);
11523 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11525 if (cfg->compile_aot) {
11526 int offset_reg, offset2_reg, idx_reg;
11528 /* For TLS variables, this will return the TLS offset */
11529 EMIT_NEW_SFLDACONST (cfg, ins, field);
11530 offset_reg = ins->dreg;
11531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11532 idx_reg = alloc_ireg (cfg);
11533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11535 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11536 array_reg = alloc_ireg (cfg);
11537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11538 offset2_reg = alloc_ireg (cfg);
11539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11541 dreg = alloc_ireg (cfg);
11542 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11544 offset = (gsize)addr & 0x7fffffff;
11545 idx = offset & 0x3f;
11547 array_reg = alloc_ireg (cfg);
11548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11549 dreg = alloc_ireg (cfg);
11550 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11552 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11553 (cfg->compile_aot && is_special_static) ||
11554 (context_used && is_special_static)) {
11555 MonoInst *iargs [2];
11557 g_assert (field->parent);
11558 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11559 if (context_used) {
11560 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11561 field, MONO_RGCTX_INFO_CLASS_FIELD);
11563 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11565 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11566 } else if (context_used) {
11567 MonoInst *static_data;
11570 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11571 method->klass->name_space, method->klass->name, method->name,
11572 depth, field->offset);
11575 if (mono_class_needs_cctor_run (klass, method))
11576 emit_class_init (cfg, klass);
11579 * The pointer we're computing here is
11581 * super_info.static_data + field->offset
11583 static_data = emit_get_rgctx_klass (cfg, context_used,
11584 klass, MONO_RGCTX_INFO_STATIC_DATA);
11586 if (mini_is_gsharedvt_klass (klass)) {
11587 MonoInst *offset_ins;
11589 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11590 /* The value is offset by 1 */
11591 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11592 dreg = alloc_ireg_mp (cfg);
11593 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11594 } else if (field->offset == 0) {
11597 int addr_reg = mono_alloc_preg (cfg);
11598 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11600 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11601 MonoInst *iargs [2];
11603 g_assert (field->parent);
11604 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11605 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11606 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11608 MonoVTable *vtable = NULL;
11610 if (!cfg->compile_aot)
11611 vtable = mono_class_vtable (cfg->domain, klass);
11612 CHECK_TYPELOAD (klass);
11615 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11616 if (!(g_slist_find (class_inits, klass))) {
11617 emit_class_init (cfg, klass);
11618 if (cfg->verbose_level > 2)
11619 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11620 class_inits = g_slist_prepend (class_inits, klass);
11623 if (cfg->run_cctors) {
11625 /* This makes so that inline cannot trigger */
11626 /* .cctors: too many apps depend on them */
11627 /* running with a specific order... */
11629 if (! vtable->initialized)
11630 INLINE_FAILURE ("class init");
11631 ex = mono_runtime_class_init_full (vtable, FALSE);
11633 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11634 mono_error_set_exception_instance (&cfg->error, ex);
11635 g_assert_not_reached ();
11636 goto exception_exit;
11640 if (cfg->compile_aot)
11641 EMIT_NEW_SFLDACONST (cfg, ins, field);
11644 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11646 EMIT_NEW_PCONST (cfg, ins, addr);
11649 MonoInst *iargs [1];
11650 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11651 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11655 /* Generate IR to do the actual load/store operation */
11657 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11658 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11659 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11662 if (op == CEE_LDSFLDA) {
11663 ins->klass = mono_class_from_mono_type (ftype);
11664 ins->type = STACK_PTR;
11666 } else if (op == CEE_STSFLD) {
11669 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11670 store->flags |= ins_flag;
11672 gboolean is_const = FALSE;
11673 MonoVTable *vtable = NULL;
11674 gpointer addr = NULL;
11676 if (!context_used) {
11677 vtable = mono_class_vtable (cfg->domain, klass);
11678 CHECK_TYPELOAD (klass);
11680 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11681 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11682 int ro_type = ftype->type;
11684 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11685 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11686 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11689 GSHAREDVT_FAILURE (op);
11691 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11694 case MONO_TYPE_BOOLEAN:
11696 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11700 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11703 case MONO_TYPE_CHAR:
11705 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11709 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11714 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11718 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11723 case MONO_TYPE_PTR:
11724 case MONO_TYPE_FNPTR:
11725 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11726 type_to_eval_stack_type ((cfg), field->type, *sp);
11729 case MONO_TYPE_STRING:
11730 case MONO_TYPE_OBJECT:
11731 case MONO_TYPE_CLASS:
11732 case MONO_TYPE_SZARRAY:
11733 case MONO_TYPE_ARRAY:
11734 if (!mono_gc_is_moving ()) {
11735 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11736 type_to_eval_stack_type ((cfg), field->type, *sp);
11744 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11749 case MONO_TYPE_VALUETYPE:
11759 CHECK_STACK_OVF (1);
11761 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11762 load->flags |= ins_flag;
11768 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11769 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11770 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11781 token = read32 (ip + 1);
11782 klass = mini_get_class (method, token, generic_context);
11783 CHECK_TYPELOAD (klass);
11784 if (ins_flag & MONO_INST_VOLATILE) {
11785 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11786 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11788 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11789 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11790 ins->flags |= ins_flag;
11791 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11792 generic_class_is_reference_type (cfg, klass)) {
11793 /* insert call to write barrier */
11794 emit_write_barrier (cfg, sp [0], sp [1]);
11806 const char *data_ptr;
11808 guint32 field_token;
11814 token = read32 (ip + 1);
11816 klass = mini_get_class (method, token, generic_context);
11817 CHECK_TYPELOAD (klass);
11819 context_used = mini_class_check_context_used (cfg, klass);
11821 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11822 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11823 ins->sreg1 = sp [0]->dreg;
11824 ins->type = STACK_I4;
11825 ins->dreg = alloc_ireg (cfg);
11826 MONO_ADD_INS (cfg->cbb, ins);
11827 *sp = mono_decompose_opcode (cfg, ins);
11830 if (context_used) {
11831 MonoInst *args [3];
11832 MonoClass *array_class = mono_array_class_get (klass, 1);
11833 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11835 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11838 args [0] = emit_get_rgctx_klass (cfg, context_used,
11839 array_class, MONO_RGCTX_INFO_VTABLE);
11844 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11846 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11848 if (cfg->opt & MONO_OPT_SHARED) {
11849 /* Decompose now to avoid problems with references to the domainvar */
11850 MonoInst *iargs [3];
11852 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11853 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11854 iargs [2] = sp [0];
11856 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11858 /* Decompose later since it is needed by abcrem */
11859 MonoClass *array_type = mono_array_class_get (klass, 1);
11860 mono_class_vtable (cfg->domain, array_type);
11861 CHECK_TYPELOAD (array_type);
11863 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11864 ins->dreg = alloc_ireg_ref (cfg);
11865 ins->sreg1 = sp [0]->dreg;
11866 ins->inst_newa_class = klass;
11867 ins->type = STACK_OBJ;
11868 ins->klass = array_type;
11869 MONO_ADD_INS (cfg->cbb, ins);
11870 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11871 cfg->cbb->has_array_access = TRUE;
11873 /* Needed so mono_emit_load_get_addr () gets called */
11874 mono_get_got_var (cfg);
11884 * we inline/optimize the initialization sequence if possible.
11885 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11886 * for small sizes open code the memcpy
11887 * ensure the rva field is big enough
11889 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11890 MonoMethod *memcpy_method = get_memcpy_method ();
11891 MonoInst *iargs [3];
11892 int add_reg = alloc_ireg_mp (cfg);
11894 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11895 if (cfg->compile_aot) {
11896 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11898 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11900 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11901 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11910 if (sp [0]->type != STACK_OBJ)
11913 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11914 ins->dreg = alloc_preg (cfg);
11915 ins->sreg1 = sp [0]->dreg;
11916 ins->type = STACK_I4;
11917 /* This flag will be inherited by the decomposition */
11918 ins->flags |= MONO_INST_FAULT;
11919 MONO_ADD_INS (cfg->cbb, ins);
11920 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11921 cfg->cbb->has_array_access = TRUE;
11929 if (sp [0]->type != STACK_OBJ)
11932 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11934 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11935 CHECK_TYPELOAD (klass);
11936 /* we need to make sure that this array is exactly the type it needs
11937 * to be for correctness. the wrappers are lax with their usage
11938 * so we need to ignore them here
11940 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11941 MonoClass *array_class = mono_array_class_get (klass, 1);
11942 mini_emit_check_array_type (cfg, sp [0], array_class);
11943 CHECK_TYPELOAD (array_class);
11947 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11952 case CEE_LDELEM_I1:
11953 case CEE_LDELEM_U1:
11954 case CEE_LDELEM_I2:
11955 case CEE_LDELEM_U2:
11956 case CEE_LDELEM_I4:
11957 case CEE_LDELEM_U4:
11958 case CEE_LDELEM_I8:
11960 case CEE_LDELEM_R4:
11961 case CEE_LDELEM_R8:
11962 case CEE_LDELEM_REF: {
11968 if (*ip == CEE_LDELEM) {
11970 token = read32 (ip + 1);
11971 klass = mini_get_class (method, token, generic_context);
11972 CHECK_TYPELOAD (klass);
11973 mono_class_init (klass);
11976 klass = array_access_to_klass (*ip);
11978 if (sp [0]->type != STACK_OBJ)
11981 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11983 if (mini_is_gsharedvt_variable_klass (klass)) {
11984 // FIXME-VT: OP_ICONST optimization
11985 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11986 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11987 ins->opcode = OP_LOADV_MEMBASE;
11988 } else if (sp [1]->opcode == OP_ICONST) {
11989 int array_reg = sp [0]->dreg;
11990 int index_reg = sp [1]->dreg;
11991 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11993 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11994 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11996 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11997 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11999 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12000 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12003 if (*ip == CEE_LDELEM)
12010 case CEE_STELEM_I1:
12011 case CEE_STELEM_I2:
12012 case CEE_STELEM_I4:
12013 case CEE_STELEM_I8:
12014 case CEE_STELEM_R4:
12015 case CEE_STELEM_R8:
12016 case CEE_STELEM_REF:
12021 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12023 if (*ip == CEE_STELEM) {
12025 token = read32 (ip + 1);
12026 klass = mini_get_class (method, token, generic_context);
12027 CHECK_TYPELOAD (klass);
12028 mono_class_init (klass);
12031 klass = array_access_to_klass (*ip);
12033 if (sp [0]->type != STACK_OBJ)
12036 emit_array_store (cfg, klass, sp, TRUE);
12038 if (*ip == CEE_STELEM)
12045 case CEE_CKFINITE: {
12049 if (cfg->llvm_only) {
12050 MonoInst *iargs [1];
12052 iargs [0] = sp [0];
12053 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12055 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12056 ins->sreg1 = sp [0]->dreg;
12057 ins->dreg = alloc_freg (cfg);
12058 ins->type = STACK_R8;
12059 MONO_ADD_INS (cfg->cbb, ins);
12061 *sp++ = mono_decompose_opcode (cfg, ins);
12067 case CEE_REFANYVAL: {
12068 MonoInst *src_var, *src;
12070 int klass_reg = alloc_preg (cfg);
12071 int dreg = alloc_preg (cfg);
12073 GSHAREDVT_FAILURE (*ip);
12076 MONO_INST_NEW (cfg, ins, *ip);
12079 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12080 CHECK_TYPELOAD (klass);
12082 context_used = mini_class_check_context_used (cfg, klass);
12085 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12087 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12088 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12091 if (context_used) {
12092 MonoInst *klass_ins;
12094 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12095 klass, MONO_RGCTX_INFO_KLASS);
12098 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12099 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12101 mini_emit_class_check (cfg, klass_reg, klass);
12103 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12104 ins->type = STACK_MP;
12105 ins->klass = klass;
12110 case CEE_MKREFANY: {
12111 MonoInst *loc, *addr;
12113 GSHAREDVT_FAILURE (*ip);
12116 MONO_INST_NEW (cfg, ins, *ip);
12119 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12120 CHECK_TYPELOAD (klass);
12122 context_used = mini_class_check_context_used (cfg, klass);
12124 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12125 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12127 if (context_used) {
12128 MonoInst *const_ins;
12129 int type_reg = alloc_preg (cfg);
12131 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12135 } else if (cfg->compile_aot) {
12136 int const_reg = alloc_preg (cfg);
12137 int type_reg = alloc_preg (cfg);
12139 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12142 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12144 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12145 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12149 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12150 ins->type = STACK_VTYPE;
12151 ins->klass = mono_defaults.typed_reference_class;
12156 case CEE_LDTOKEN: {
12158 MonoClass *handle_class;
12160 CHECK_STACK_OVF (1);
12163 n = read32 (ip + 1);
12165 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12166 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12167 handle = mono_method_get_wrapper_data (method, n);
12168 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12169 if (handle_class == mono_defaults.typehandle_class)
12170 handle = &((MonoClass*)handle)->byval_arg;
12173 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12178 mono_class_init (handle_class);
12179 if (cfg->gshared) {
12180 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12181 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12182 /* This case handles ldtoken
12183 of an open type, like for
12186 } else if (handle_class == mono_defaults.typehandle_class) {
12187 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12188 } else if (handle_class == mono_defaults.fieldhandle_class)
12189 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12190 else if (handle_class == mono_defaults.methodhandle_class)
12191 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12193 g_assert_not_reached ();
12196 if ((cfg->opt & MONO_OPT_SHARED) &&
12197 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12198 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12199 MonoInst *addr, *vtvar, *iargs [3];
12200 int method_context_used;
12202 method_context_used = mini_method_check_context_used (cfg, method);
12204 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12206 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12207 EMIT_NEW_ICONST (cfg, iargs [1], n);
12208 if (method_context_used) {
12209 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12210 method, MONO_RGCTX_INFO_METHOD);
12211 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12213 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12214 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12216 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12218 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12220 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12222 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12223 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12224 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12225 (cmethod->klass == mono_defaults.systemtype_class) &&
12226 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12227 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12229 mono_class_init (tclass);
12230 if (context_used) {
12231 ins = emit_get_rgctx_klass (cfg, context_used,
12232 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12233 } else if (cfg->compile_aot) {
12234 if (method->wrapper_type) {
12235 mono_error_init (&error); //got to do it since there are multiple conditionals below
12236 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12237 /* Special case for static synchronized wrappers */
12238 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12240 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12241 /* FIXME: n is not a normal token */
12243 EMIT_NEW_PCONST (cfg, ins, NULL);
12246 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12250 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &error);
12251 mono_error_raise_exception (&error); /* FIXME don't raise here */
12253 EMIT_NEW_PCONST (cfg, ins, rt);
12255 ins->type = STACK_OBJ;
12256 ins->klass = cmethod->klass;
12259 MonoInst *addr, *vtvar;
12261 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12263 if (context_used) {
12264 if (handle_class == mono_defaults.typehandle_class) {
12265 ins = emit_get_rgctx_klass (cfg, context_used,
12266 mono_class_from_mono_type ((MonoType *)handle),
12267 MONO_RGCTX_INFO_TYPE);
12268 } else if (handle_class == mono_defaults.methodhandle_class) {
12269 ins = emit_get_rgctx_method (cfg, context_used,
12270 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12271 } else if (handle_class == mono_defaults.fieldhandle_class) {
12272 ins = emit_get_rgctx_field (cfg, context_used,
12273 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12275 g_assert_not_reached ();
12277 } else if (cfg->compile_aot) {
12278 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12280 EMIT_NEW_PCONST (cfg, ins, handle);
12282 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12283 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12284 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12294 MONO_INST_NEW (cfg, ins, OP_THROW);
12296 ins->sreg1 = sp [0]->dreg;
12298 cfg->cbb->out_of_line = TRUE;
12299 MONO_ADD_INS (cfg->cbb, ins);
12300 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12301 MONO_ADD_INS (cfg->cbb, ins);
12304 link_bblock (cfg, cfg->cbb, end_bblock);
12305 start_new_bblock = 1;
12306 /* This can complicate code generation for llvm since the return value might not be defined */
12307 if (COMPILE_LLVM (cfg))
12308 INLINE_FAILURE ("throw");
12310 case CEE_ENDFINALLY:
12311 /* mono_save_seq_point_info () depends on this */
12312 if (sp != stack_start)
12313 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12314 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12315 MONO_ADD_INS (cfg->cbb, ins);
12317 start_new_bblock = 1;
12320 * Control will leave the method so empty the stack, otherwise
12321 * the next basic block will start with a nonempty stack.
12323 while (sp != stack_start) {
12328 case CEE_LEAVE_S: {
12331 if (*ip == CEE_LEAVE) {
12333 target = ip + 5 + (gint32)read32(ip + 1);
12336 target = ip + 2 + (signed char)(ip [1]);
12339 /* empty the stack */
12340 while (sp != stack_start) {
12345 * If this leave statement is in a catch block, check for a
12346 * pending exception, and rethrow it if necessary.
12347 * We avoid doing this in runtime invoke wrappers, since those are called
12348 * by native code which excepts the wrapper to catch all exceptions.
12350 for (i = 0; i < header->num_clauses; ++i) {
12351 MonoExceptionClause *clause = &header->clauses [i];
12354 * Use <= in the final comparison to handle clauses with multiple
12355 * leave statements, like in bug #78024.
12356 * The ordering of the exception clauses guarantees that we find the
12357 * innermost clause.
12359 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12361 MonoBasicBlock *dont_throw;
12366 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12369 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12371 NEW_BBLOCK (cfg, dont_throw);
12374 * Currently, we always rethrow the abort exception, despite the
12375 * fact that this is not correct. See thread6.cs for an example.
12376 * But propagating the abort exception is more important than
12377 * getting the sematics right.
12379 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12380 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12381 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12383 MONO_START_BB (cfg, dont_throw);
12388 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12391 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12393 MonoExceptionClause *clause;
12395 for (tmp = handlers; tmp; tmp = tmp->next) {
12396 clause = (MonoExceptionClause *)tmp->data;
12397 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12399 link_bblock (cfg, cfg->cbb, tblock);
12400 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12401 ins->inst_target_bb = tblock;
12402 ins->inst_eh_block = clause;
12403 MONO_ADD_INS (cfg->cbb, ins);
12404 cfg->cbb->has_call_handler = 1;
12405 if (COMPILE_LLVM (cfg)) {
12406 MonoBasicBlock *target_bb;
12409 * Link the finally bblock with the target, since it will
12410 * conceptually branch there.
12412 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12413 GET_BBLOCK (cfg, target_bb, target);
12414 link_bblock (cfg, tblock, target_bb);
12417 g_list_free (handlers);
12420 MONO_INST_NEW (cfg, ins, OP_BR);
12421 MONO_ADD_INS (cfg->cbb, ins);
12422 GET_BBLOCK (cfg, tblock, target);
12423 link_bblock (cfg, cfg->cbb, tblock);
12424 ins->inst_target_bb = tblock;
12426 start_new_bblock = 1;
12428 if (*ip == CEE_LEAVE)
12437 * Mono specific opcodes
12439 case MONO_CUSTOM_PREFIX: {
12441 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12445 case CEE_MONO_ICALL: {
12447 MonoJitICallInfo *info;
12449 token = read32 (ip + 2);
12450 func = mono_method_get_wrapper_data (method, token);
12451 info = mono_find_jit_icall_by_addr (func);
12453 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12456 CHECK_STACK (info->sig->param_count);
12457 sp -= info->sig->param_count;
12459 ins = mono_emit_jit_icall (cfg, info->func, sp);
12460 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12464 inline_costs += 10 * num_calls++;
12468 case CEE_MONO_LDPTR_CARD_TABLE:
12469 case CEE_MONO_LDPTR_NURSERY_START:
12470 case CEE_MONO_LDPTR_NURSERY_BITS:
12471 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12472 CHECK_STACK_OVF (1);
12475 case CEE_MONO_LDPTR_CARD_TABLE:
12476 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12478 case CEE_MONO_LDPTR_NURSERY_START:
12479 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12481 case CEE_MONO_LDPTR_NURSERY_BITS:
12482 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12484 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12485 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12491 inline_costs += 10 * num_calls++;
12494 case CEE_MONO_LDPTR: {
12497 CHECK_STACK_OVF (1);
12499 token = read32 (ip + 2);
12501 ptr = mono_method_get_wrapper_data (method, token);
12502 EMIT_NEW_PCONST (cfg, ins, ptr);
12505 inline_costs += 10 * num_calls++;
12506 /* Can't embed random pointers into AOT code */
12510 case CEE_MONO_JIT_ICALL_ADDR: {
12511 MonoJitICallInfo *callinfo;
12514 CHECK_STACK_OVF (1);
12516 token = read32 (ip + 2);
12518 ptr = mono_method_get_wrapper_data (method, token);
12519 callinfo = mono_find_jit_icall_by_addr (ptr);
12520 g_assert (callinfo);
12521 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12524 inline_costs += 10 * num_calls++;
12527 case CEE_MONO_ICALL_ADDR: {
12528 MonoMethod *cmethod;
12531 CHECK_STACK_OVF (1);
12533 token = read32 (ip + 2);
12535 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12537 if (cfg->compile_aot) {
12538 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12540 ptr = mono_lookup_internal_call (cmethod);
12542 EMIT_NEW_PCONST (cfg, ins, ptr);
12548 case CEE_MONO_VTADDR: {
12549 MonoInst *src_var, *src;
12555 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12556 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12561 case CEE_MONO_NEWOBJ: {
12562 MonoInst *iargs [2];
12564 CHECK_STACK_OVF (1);
12566 token = read32 (ip + 2);
12567 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12568 mono_class_init (klass);
12569 NEW_DOMAINCONST (cfg, iargs [0]);
12570 MONO_ADD_INS (cfg->cbb, iargs [0]);
12571 NEW_CLASSCONST (cfg, iargs [1], klass);
12572 MONO_ADD_INS (cfg->cbb, iargs [1]);
12573 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12575 inline_costs += 10 * num_calls++;
12578 case CEE_MONO_OBJADDR:
12581 MONO_INST_NEW (cfg, ins, OP_MOVE);
12582 ins->dreg = alloc_ireg_mp (cfg);
12583 ins->sreg1 = sp [0]->dreg;
12584 ins->type = STACK_MP;
12585 MONO_ADD_INS (cfg->cbb, ins);
12589 case CEE_MONO_LDNATIVEOBJ:
12591 * Similar to LDOBJ, but instead load the unmanaged
12592 * representation of the vtype to the stack.
12597 token = read32 (ip + 2);
12598 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12599 g_assert (klass->valuetype);
12600 mono_class_init (klass);
12603 MonoInst *src, *dest, *temp;
12606 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12607 temp->backend.is_pinvoke = 1;
12608 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12609 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12611 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12612 dest->type = STACK_VTYPE;
12613 dest->klass = klass;
12619 case CEE_MONO_RETOBJ: {
12621 * Same as RET, but return the native representation of a vtype
12624 g_assert (cfg->ret);
12625 g_assert (mono_method_signature (method)->pinvoke);
12630 token = read32 (ip + 2);
12631 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12633 if (!cfg->vret_addr) {
12634 g_assert (cfg->ret_var_is_local);
12636 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12638 EMIT_NEW_RETLOADA (cfg, ins);
12640 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12642 if (sp != stack_start)
12645 MONO_INST_NEW (cfg, ins, OP_BR);
12646 ins->inst_target_bb = end_bblock;
12647 MONO_ADD_INS (cfg->cbb, ins);
12648 link_bblock (cfg, cfg->cbb, end_bblock);
12649 start_new_bblock = 1;
12653 case CEE_MONO_CISINST:
12654 case CEE_MONO_CCASTCLASS: {
12659 token = read32 (ip + 2);
12660 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12661 if (ip [1] == CEE_MONO_CISINST)
12662 ins = handle_cisinst (cfg, klass, sp [0]);
12664 ins = handle_ccastclass (cfg, klass, sp [0]);
12669 case CEE_MONO_SAVE_LMF:
12670 case CEE_MONO_RESTORE_LMF:
12673 case CEE_MONO_CLASSCONST:
12674 CHECK_STACK_OVF (1);
12676 token = read32 (ip + 2);
12677 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12680 inline_costs += 10 * num_calls++;
12682 case CEE_MONO_NOT_TAKEN:
12683 cfg->cbb->out_of_line = TRUE;
12686 case CEE_MONO_TLS: {
12689 CHECK_STACK_OVF (1);
12691 key = (MonoTlsKey)read32 (ip + 2);
12692 g_assert (key < TLS_KEY_NUM);
12694 ins = mono_create_tls_get (cfg, key);
12696 if (cfg->compile_aot) {
12698 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12699 ins->dreg = alloc_preg (cfg);
12700 ins->type = STACK_PTR;
12702 g_assert_not_reached ();
12705 ins->type = STACK_PTR;
12706 MONO_ADD_INS (cfg->cbb, ins);
12711 case CEE_MONO_DYN_CALL: {
12712 MonoCallInst *call;
12714 /* It would be easier to call a trampoline, but that would put an
12715 * extra frame on the stack, confusing exception handling. So
12716 * implement it inline using an opcode for now.
12719 if (!cfg->dyn_call_var) {
12720 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12721 /* prevent it from being register allocated */
12722 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12725 /* Has to use a call inst since it local regalloc expects it */
12726 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12727 ins = (MonoInst*)call;
12729 ins->sreg1 = sp [0]->dreg;
12730 ins->sreg2 = sp [1]->dreg;
12731 MONO_ADD_INS (cfg->cbb, ins);
12733 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12736 inline_costs += 10 * num_calls++;
12740 case CEE_MONO_MEMORY_BARRIER: {
12742 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12746 case CEE_MONO_JIT_ATTACH: {
12747 MonoInst *args [16], *domain_ins;
12748 MonoInst *ad_ins, *jit_tls_ins;
12749 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12751 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12753 EMIT_NEW_PCONST (cfg, ins, NULL);
12754 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12756 ad_ins = mono_get_domain_intrinsic (cfg);
12757 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12759 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12760 NEW_BBLOCK (cfg, next_bb);
12761 NEW_BBLOCK (cfg, call_bb);
12763 if (cfg->compile_aot) {
12764 /* AOT code is only used in the root domain */
12765 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12767 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12769 MONO_ADD_INS (cfg->cbb, ad_ins);
12770 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12773 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12774 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12775 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12777 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12778 MONO_START_BB (cfg, call_bb);
12781 if (cfg->compile_aot) {
12782 /* AOT code is only used in the root domain */
12783 EMIT_NEW_PCONST (cfg, args [0], NULL);
12785 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12787 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12788 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12791 MONO_START_BB (cfg, next_bb);
12795 case CEE_MONO_JIT_DETACH: {
12796 MonoInst *args [16];
12798 /* Restore the original domain */
12799 dreg = alloc_ireg (cfg);
12800 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12801 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12805 case CEE_MONO_CALLI_EXTRA_ARG: {
12807 MonoMethodSignature *fsig;
12811 * This is the same as CEE_CALLI, but passes an additional argument
12812 * to the called method in llvmonly mode.
12813 * This is only used by delegate invoke wrappers to call the
12814 * actual delegate method.
12816 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12819 token = read32 (ip + 2);
12827 fsig = mini_get_signature (method, token, generic_context);
12829 if (cfg->llvm_only)
12830 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12832 n = fsig->param_count + fsig->hasthis + 1;
12839 if (cfg->llvm_only) {
12841 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12842 * cconv. This is set by mono_init_delegate ().
12844 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12845 MonoInst *callee = addr;
12846 MonoInst *call, *localloc_ins;
12847 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12848 int low_bit_reg = alloc_preg (cfg);
12850 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12851 NEW_BBLOCK (cfg, end_bb);
12853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12854 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12855 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12857 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12858 addr = emit_get_rgctx_sig (cfg, context_used,
12859 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12861 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12863 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12864 ins->dreg = alloc_preg (cfg);
12865 ins->inst_imm = 2 * SIZEOF_VOID_P;
12866 MONO_ADD_INS (cfg->cbb, ins);
12867 localloc_ins = ins;
12868 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12872 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12875 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12876 MONO_START_BB (cfg, is_gsharedvt_bb);
12877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12878 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12879 ins->dreg = call->dreg;
12881 MONO_START_BB (cfg, end_bb);
12883 /* Caller uses a normal calling conv */
12885 MonoInst *callee = addr;
12886 MonoInst *call, *localloc_ins;
12887 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12888 int low_bit_reg = alloc_preg (cfg);
12890 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12891 NEW_BBLOCK (cfg, end_bb);
12893 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12894 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12895 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12897 /* Normal case: callee uses a normal cconv, no conversion is needed */
12898 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12899 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12900 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12901 MONO_START_BB (cfg, is_gsharedvt_bb);
12902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12903 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12904 MONO_ADD_INS (cfg->cbb, addr);
12906 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12908 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12909 ins->dreg = alloc_preg (cfg);
12910 ins->inst_imm = 2 * SIZEOF_VOID_P;
12911 MONO_ADD_INS (cfg->cbb, ins);
12912 localloc_ins = ins;
12913 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12914 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12915 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12917 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12918 ins->dreg = call->dreg;
12919 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12921 MONO_START_BB (cfg, end_bb);
12924 /* Same as CEE_CALLI */
12925 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12927 * We pass the address to the gsharedvt trampoline in the rgctx reg
12929 MonoInst *callee = addr;
12931 addr = emit_get_rgctx_sig (cfg, context_used,
12932 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12933 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12935 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12939 if (!MONO_TYPE_IS_VOID (fsig->ret))
12940 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12942 CHECK_CFG_EXCEPTION;
12946 constrained_class = NULL;
12950 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12956 case CEE_PREFIX1: {
12959 case CEE_ARGLIST: {
12960 /* somewhat similar to LDTOKEN */
12961 MonoInst *addr, *vtvar;
12962 CHECK_STACK_OVF (1);
12963 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12965 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12966 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12968 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12969 ins->type = STACK_VTYPE;
12970 ins->klass = mono_defaults.argumenthandle_class;
12980 MonoInst *cmp, *arg1, *arg2;
12988 * The following transforms:
12989 * CEE_CEQ into OP_CEQ
12990 * CEE_CGT into OP_CGT
12991 * CEE_CGT_UN into OP_CGT_UN
12992 * CEE_CLT into OP_CLT
12993 * CEE_CLT_UN into OP_CLT_UN
12995 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12997 MONO_INST_NEW (cfg, ins, cmp->opcode);
12998 cmp->sreg1 = arg1->dreg;
12999 cmp->sreg2 = arg2->dreg;
13000 type_from_op (cfg, cmp, arg1, arg2);
13002 add_widen_op (cfg, cmp, &arg1, &arg2);
13003 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13004 cmp->opcode = OP_LCOMPARE;
13005 else if (arg1->type == STACK_R4)
13006 cmp->opcode = OP_RCOMPARE;
13007 else if (arg1->type == STACK_R8)
13008 cmp->opcode = OP_FCOMPARE;
13010 cmp->opcode = OP_ICOMPARE;
13011 MONO_ADD_INS (cfg->cbb, cmp);
13012 ins->type = STACK_I4;
13013 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13014 type_from_op (cfg, ins, arg1, arg2);
13016 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13018 * The backends expect the fceq opcodes to do the
13021 ins->sreg1 = cmp->sreg1;
13022 ins->sreg2 = cmp->sreg2;
13025 MONO_ADD_INS (cfg->cbb, ins);
13031 MonoInst *argconst;
13032 MonoMethod *cil_method;
13034 CHECK_STACK_OVF (1);
13036 n = read32 (ip + 2);
13037 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13040 mono_class_init (cmethod->klass);
13042 mono_save_token_info (cfg, image, n, cmethod);
13044 context_used = mini_method_check_context_used (cfg, cmethod);
13046 cil_method = cmethod;
13047 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13048 METHOD_ACCESS_FAILURE (method, cil_method);
13050 if (mono_security_core_clr_enabled ())
13051 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13054 * Optimize the common case of ldftn+delegate creation
13056 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13057 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13058 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13059 MonoInst *target_ins, *handle_ins;
13060 MonoMethod *invoke;
13061 int invoke_context_used;
13063 invoke = mono_get_delegate_invoke (ctor_method->klass);
13064 if (!invoke || !mono_method_signature (invoke))
13067 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13069 target_ins = sp [-1];
13071 if (mono_security_core_clr_enabled ())
13072 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13074 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13075 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13076 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13078 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13082 /* FIXME: SGEN support */
13083 if (invoke_context_used == 0 || cfg->llvm_only) {
13085 if (cfg->verbose_level > 3)
13086 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13087 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13090 CHECK_CFG_EXCEPTION;
13100 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13101 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13105 inline_costs += 10 * num_calls++;
13108 case CEE_LDVIRTFTN: {
13109 MonoInst *args [2];
13113 n = read32 (ip + 2);
13114 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13117 mono_class_init (cmethod->klass);
13119 context_used = mini_method_check_context_used (cfg, cmethod);
13121 if (mono_security_core_clr_enabled ())
13122 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13125 * Optimize the common case of ldvirtftn+delegate creation
13127 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13128 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13129 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13130 MonoInst *target_ins, *handle_ins;
13131 MonoMethod *invoke;
13132 int invoke_context_used;
13133 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13135 invoke = mono_get_delegate_invoke (ctor_method->klass);
13136 if (!invoke || !mono_method_signature (invoke))
13139 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13141 target_ins = sp [-1];
13143 if (mono_security_core_clr_enabled ())
13144 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13146 /* FIXME: SGEN support */
13147 if (invoke_context_used == 0 || cfg->llvm_only) {
13149 if (cfg->verbose_level > 3)
13150 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13151 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13154 CHECK_CFG_EXCEPTION;
13167 args [1] = emit_get_rgctx_method (cfg, context_used,
13168 cmethod, MONO_RGCTX_INFO_METHOD);
13171 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13173 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13176 inline_costs += 10 * num_calls++;
13180 CHECK_STACK_OVF (1);
13182 n = read16 (ip + 2);
13184 EMIT_NEW_ARGLOAD (cfg, ins, n);
13189 CHECK_STACK_OVF (1);
13191 n = read16 (ip + 2);
13193 NEW_ARGLOADA (cfg, ins, n);
13194 MONO_ADD_INS (cfg->cbb, ins);
13202 n = read16 (ip + 2);
13204 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13206 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13210 CHECK_STACK_OVF (1);
13212 n = read16 (ip + 2);
13214 EMIT_NEW_LOCLOAD (cfg, ins, n);
13219 unsigned char *tmp_ip;
13220 CHECK_STACK_OVF (1);
13222 n = read16 (ip + 2);
13225 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13231 EMIT_NEW_LOCLOADA (cfg, ins, n);
13240 n = read16 (ip + 2);
13242 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13244 emit_stloc_ir (cfg, sp, header, n);
13251 if (sp != stack_start)
13253 if (cfg->method != method)
13255 * Inlining this into a loop in a parent could lead to
13256 * stack overflows which is different behavior than the
13257 * non-inlined case, thus disable inlining in this case.
13259 INLINE_FAILURE("localloc");
13261 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13262 ins->dreg = alloc_preg (cfg);
13263 ins->sreg1 = sp [0]->dreg;
13264 ins->type = STACK_PTR;
13265 MONO_ADD_INS (cfg->cbb, ins);
13267 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13269 ins->flags |= MONO_INST_INIT;
13274 case CEE_ENDFILTER: {
13275 MonoExceptionClause *clause, *nearest;
13280 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13282 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13283 ins->sreg1 = (*sp)->dreg;
13284 MONO_ADD_INS (cfg->cbb, ins);
13285 start_new_bblock = 1;
13289 for (cc = 0; cc < header->num_clauses; ++cc) {
13290 clause = &header->clauses [cc];
13291 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13292 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13293 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13296 g_assert (nearest);
13297 if ((ip - header->code) != nearest->handler_offset)
13302 case CEE_UNALIGNED_:
13303 ins_flag |= MONO_INST_UNALIGNED;
13304 /* FIXME: record alignment? we can assume 1 for now */
13308 case CEE_VOLATILE_:
13309 ins_flag |= MONO_INST_VOLATILE;
13313 ins_flag |= MONO_INST_TAILCALL;
13314 cfg->flags |= MONO_CFG_HAS_TAIL;
13315 /* Can't inline tail calls at this time */
13316 inline_costs += 100000;
13323 token = read32 (ip + 2);
13324 klass = mini_get_class (method, token, generic_context);
13325 CHECK_TYPELOAD (klass);
13326 if (generic_class_is_reference_type (cfg, klass))
13327 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13329 mini_emit_initobj (cfg, *sp, NULL, klass);
13333 case CEE_CONSTRAINED_:
13335 token = read32 (ip + 2);
13336 constrained_class = mini_get_class (method, token, generic_context);
13337 CHECK_TYPELOAD (constrained_class);
13341 case CEE_INITBLK: {
13342 MonoInst *iargs [3];
13346 /* Skip optimized paths for volatile operations. */
13347 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13348 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13349 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13350 /* emit_memset only works when val == 0 */
13351 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13354 iargs [0] = sp [0];
13355 iargs [1] = sp [1];
13356 iargs [2] = sp [2];
13357 if (ip [1] == CEE_CPBLK) {
13359 * FIXME: It's unclear whether we should be emitting both the acquire
13360 * and release barriers for cpblk. It is technically both a load and
13361 * store operation, so it seems like that's the sensible thing to do.
13363 * FIXME: We emit full barriers on both sides of the operation for
13364 * simplicity. We should have a separate atomic memcpy method instead.
13366 MonoMethod *memcpy_method = get_memcpy_method ();
13368 if (ins_flag & MONO_INST_VOLATILE)
13369 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13371 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13372 call->flags |= ins_flag;
13374 if (ins_flag & MONO_INST_VOLATILE)
13375 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13377 MonoMethod *memset_method = get_memset_method ();
13378 if (ins_flag & MONO_INST_VOLATILE) {
13379 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13380 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13382 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13383 call->flags |= ins_flag;
13394 ins_flag |= MONO_INST_NOTYPECHECK;
13396 ins_flag |= MONO_INST_NORANGECHECK;
13397 /* we ignore the no-nullcheck for now since we
13398 * really do it explicitly only when doing callvirt->call
13402 case CEE_RETHROW: {
13404 int handler_offset = -1;
13406 for (i = 0; i < header->num_clauses; ++i) {
13407 MonoExceptionClause *clause = &header->clauses [i];
13408 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13409 handler_offset = clause->handler_offset;
13414 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13416 if (handler_offset == -1)
13419 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13420 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13421 ins->sreg1 = load->dreg;
13422 MONO_ADD_INS (cfg->cbb, ins);
13424 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13425 MONO_ADD_INS (cfg->cbb, ins);
13428 link_bblock (cfg, cfg->cbb, end_bblock);
13429 start_new_bblock = 1;
13437 CHECK_STACK_OVF (1);
13439 token = read32 (ip + 2);
13440 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13441 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13444 val = mono_type_size (type, &ialign);
13446 MonoClass *klass = mini_get_class (method, token, generic_context);
13447 CHECK_TYPELOAD (klass);
13449 val = mono_type_size (&klass->byval_arg, &ialign);
13451 if (mini_is_gsharedvt_klass (klass))
13452 GSHAREDVT_FAILURE (*ip);
13454 EMIT_NEW_ICONST (cfg, ins, val);
13459 case CEE_REFANYTYPE: {
13460 MonoInst *src_var, *src;
13462 GSHAREDVT_FAILURE (*ip);
13468 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13470 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13471 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13472 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13477 case CEE_READONLY_:
13490 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13500 g_warning ("opcode 0x%02x not handled", *ip);
13504 if (start_new_bblock != 1)
13507 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13508 if (cfg->cbb->next_bb) {
13509 /* This could already be set because of inlining, #693905 */
13510 MonoBasicBlock *bb = cfg->cbb;
13512 while (bb->next_bb)
13514 bb->next_bb = end_bblock;
13516 cfg->cbb->next_bb = end_bblock;
13519 if (cfg->method == method && cfg->domainvar) {
13521 MonoInst *get_domain;
13523 cfg->cbb = init_localsbb;
13525 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13526 MONO_ADD_INS (cfg->cbb, get_domain);
13528 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13530 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13531 MONO_ADD_INS (cfg->cbb, store);
13534 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13535 if (cfg->compile_aot)
13536 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13537 mono_get_got_var (cfg);
13540 if (cfg->method == method && cfg->got_var)
13541 mono_emit_load_got_addr (cfg);
13543 if (init_localsbb) {
13544 cfg->cbb = init_localsbb;
13546 for (i = 0; i < header->num_locals; ++i) {
13547 emit_init_local (cfg, i, header->locals [i], init_locals);
13551 if (cfg->init_ref_vars && cfg->method == method) {
13552 /* Emit initialization for ref vars */
13553 // FIXME: Avoid duplication initialization for IL locals.
13554 for (i = 0; i < cfg->num_varinfo; ++i) {
13555 MonoInst *ins = cfg->varinfo [i];
13557 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13558 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13562 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13563 cfg->cbb = init_localsbb;
13564 emit_push_lmf (cfg);
13567 cfg->cbb = init_localsbb;
13568 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13571 MonoBasicBlock *bb;
13574 * Make seq points at backward branch targets interruptable.
13576 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13577 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13578 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13581 /* Add a sequence point for method entry/exit events */
13582 if (seq_points && cfg->gen_sdb_seq_points) {
13583 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13584 MONO_ADD_INS (init_localsbb, ins);
13585 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13586 MONO_ADD_INS (cfg->bb_exit, ins);
13590 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13591 * the code they refer to was dead (#11880).
13593 if (sym_seq_points) {
13594 for (i = 0; i < header->code_size; ++i) {
13595 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13598 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13599 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13606 if (cfg->method == method) {
13607 MonoBasicBlock *bb;
13608 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13609 bb->region = mono_find_block_region (cfg, bb->real_offset);
13611 mono_create_spvar_for_region (cfg, bb->region);
13612 if (cfg->verbose_level > 2)
13613 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13617 if (inline_costs < 0) {
13620 /* Method is too large */
13621 mname = mono_method_full_name (method, TRUE);
13622 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13626 if ((cfg->verbose_level > 2) && (cfg->method == method))
13627 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13632 g_assert (!mono_error_ok (&cfg->error));
13636 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13640 set_exception_type_from_invalid_il (cfg, method, ip);
13644 g_slist_free (class_inits);
13645 mono_basic_block_free (original_bb);
13646 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13647 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13648 if (cfg->exception_type)
13651 return inline_costs;
13655 store_membase_reg_to_store_membase_imm (int opcode)
13658 case OP_STORE_MEMBASE_REG:
13659 return OP_STORE_MEMBASE_IMM;
13660 case OP_STOREI1_MEMBASE_REG:
13661 return OP_STOREI1_MEMBASE_IMM;
13662 case OP_STOREI2_MEMBASE_REG:
13663 return OP_STOREI2_MEMBASE_IMM;
13664 case OP_STOREI4_MEMBASE_REG:
13665 return OP_STOREI4_MEMBASE_IMM;
13666 case OP_STOREI8_MEMBASE_REG:
13667 return OP_STOREI8_MEMBASE_IMM;
13669 g_assert_not_reached ();
13676 mono_op_to_op_imm (int opcode)
13680 return OP_IADD_IMM;
13682 return OP_ISUB_IMM;
13684 return OP_IDIV_IMM;
13686 return OP_IDIV_UN_IMM;
13688 return OP_IREM_IMM;
13690 return OP_IREM_UN_IMM;
13692 return OP_IMUL_IMM;
13694 return OP_IAND_IMM;
13698 return OP_IXOR_IMM;
13700 return OP_ISHL_IMM;
13702 return OP_ISHR_IMM;
13704 return OP_ISHR_UN_IMM;
13707 return OP_LADD_IMM;
13709 return OP_LSUB_IMM;
13711 return OP_LAND_IMM;
13715 return OP_LXOR_IMM;
13717 return OP_LSHL_IMM;
13719 return OP_LSHR_IMM;
13721 return OP_LSHR_UN_IMM;
13722 #if SIZEOF_REGISTER == 8
13724 return OP_LREM_IMM;
13728 return OP_COMPARE_IMM;
13730 return OP_ICOMPARE_IMM;
13732 return OP_LCOMPARE_IMM;
13734 case OP_STORE_MEMBASE_REG:
13735 return OP_STORE_MEMBASE_IMM;
13736 case OP_STOREI1_MEMBASE_REG:
13737 return OP_STOREI1_MEMBASE_IMM;
13738 case OP_STOREI2_MEMBASE_REG:
13739 return OP_STOREI2_MEMBASE_IMM;
13740 case OP_STOREI4_MEMBASE_REG:
13741 return OP_STOREI4_MEMBASE_IMM;
13743 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13745 return OP_X86_PUSH_IMM;
13746 case OP_X86_COMPARE_MEMBASE_REG:
13747 return OP_X86_COMPARE_MEMBASE_IMM;
13749 #if defined(TARGET_AMD64)
13750 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13751 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13753 case OP_VOIDCALL_REG:
13754 return OP_VOIDCALL;
13762 return OP_LOCALLOC_IMM;
13769 ldind_to_load_membase (int opcode)
13773 return OP_LOADI1_MEMBASE;
13775 return OP_LOADU1_MEMBASE;
13777 return OP_LOADI2_MEMBASE;
13779 return OP_LOADU2_MEMBASE;
13781 return OP_LOADI4_MEMBASE;
13783 return OP_LOADU4_MEMBASE;
13785 return OP_LOAD_MEMBASE;
13786 case CEE_LDIND_REF:
13787 return OP_LOAD_MEMBASE;
13789 return OP_LOADI8_MEMBASE;
13791 return OP_LOADR4_MEMBASE;
13793 return OP_LOADR8_MEMBASE;
13795 g_assert_not_reached ();
13802 stind_to_store_membase (int opcode)
13806 return OP_STOREI1_MEMBASE_REG;
13808 return OP_STOREI2_MEMBASE_REG;
13810 return OP_STOREI4_MEMBASE_REG;
13812 case CEE_STIND_REF:
13813 return OP_STORE_MEMBASE_REG;
13815 return OP_STOREI8_MEMBASE_REG;
13817 return OP_STORER4_MEMBASE_REG;
13819 return OP_STORER8_MEMBASE_REG;
13821 g_assert_not_reached ();
13828 mono_load_membase_to_load_mem (int opcode)
13830 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13831 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13833 case OP_LOAD_MEMBASE:
13834 return OP_LOAD_MEM;
13835 case OP_LOADU1_MEMBASE:
13836 return OP_LOADU1_MEM;
13837 case OP_LOADU2_MEMBASE:
13838 return OP_LOADU2_MEM;
13839 case OP_LOADI4_MEMBASE:
13840 return OP_LOADI4_MEM;
13841 case OP_LOADU4_MEMBASE:
13842 return OP_LOADU4_MEM;
13843 #if SIZEOF_REGISTER == 8
13844 case OP_LOADI8_MEMBASE:
13845 return OP_LOADI8_MEM;
13854 op_to_op_dest_membase (int store_opcode, int opcode)
13856 #if defined(TARGET_X86)
13857 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13862 return OP_X86_ADD_MEMBASE_REG;
13864 return OP_X86_SUB_MEMBASE_REG;
13866 return OP_X86_AND_MEMBASE_REG;
13868 return OP_X86_OR_MEMBASE_REG;
13870 return OP_X86_XOR_MEMBASE_REG;
13873 return OP_X86_ADD_MEMBASE_IMM;
13876 return OP_X86_SUB_MEMBASE_IMM;
13879 return OP_X86_AND_MEMBASE_IMM;
13882 return OP_X86_OR_MEMBASE_IMM;
13885 return OP_X86_XOR_MEMBASE_IMM;
13891 #if defined(TARGET_AMD64)
13892 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13897 return OP_X86_ADD_MEMBASE_REG;
13899 return OP_X86_SUB_MEMBASE_REG;
13901 return OP_X86_AND_MEMBASE_REG;
13903 return OP_X86_OR_MEMBASE_REG;
13905 return OP_X86_XOR_MEMBASE_REG;
13907 return OP_X86_ADD_MEMBASE_IMM;
13909 return OP_X86_SUB_MEMBASE_IMM;
13911 return OP_X86_AND_MEMBASE_IMM;
13913 return OP_X86_OR_MEMBASE_IMM;
13915 return OP_X86_XOR_MEMBASE_IMM;
13917 return OP_AMD64_ADD_MEMBASE_REG;
13919 return OP_AMD64_SUB_MEMBASE_REG;
13921 return OP_AMD64_AND_MEMBASE_REG;
13923 return OP_AMD64_OR_MEMBASE_REG;
13925 return OP_AMD64_XOR_MEMBASE_REG;
13928 return OP_AMD64_ADD_MEMBASE_IMM;
13931 return OP_AMD64_SUB_MEMBASE_IMM;
13934 return OP_AMD64_AND_MEMBASE_IMM;
13937 return OP_AMD64_OR_MEMBASE_IMM;
13940 return OP_AMD64_XOR_MEMBASE_IMM;
13950 op_to_op_store_membase (int store_opcode, int opcode)
13952 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13955 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13956 return OP_X86_SETEQ_MEMBASE;
13958 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13959 return OP_X86_SETNE_MEMBASE;
13967 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13970 /* FIXME: This has sign extension issues */
13972 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13973 return OP_X86_COMPARE_MEMBASE8_IMM;
13976 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13981 return OP_X86_PUSH_MEMBASE;
13982 case OP_COMPARE_IMM:
13983 case OP_ICOMPARE_IMM:
13984 return OP_X86_COMPARE_MEMBASE_IMM;
13987 return OP_X86_COMPARE_MEMBASE_REG;
13991 #ifdef TARGET_AMD64
13992 /* FIXME: This has sign extension issues */
13994 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13995 return OP_X86_COMPARE_MEMBASE8_IMM;
14000 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14001 return OP_X86_PUSH_MEMBASE;
14003 /* FIXME: This only works for 32 bit immediates
14004 case OP_COMPARE_IMM:
14005 case OP_LCOMPARE_IMM:
14006 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14007 return OP_AMD64_COMPARE_MEMBASE_IMM;
14009 case OP_ICOMPARE_IMM:
14010 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14011 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14015 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14016 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14017 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14018 return OP_AMD64_COMPARE_MEMBASE_REG;
14021 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14022 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14031 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14034 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14040 return OP_X86_COMPARE_REG_MEMBASE;
14042 return OP_X86_ADD_REG_MEMBASE;
14044 return OP_X86_SUB_REG_MEMBASE;
14046 return OP_X86_AND_REG_MEMBASE;
14048 return OP_X86_OR_REG_MEMBASE;
14050 return OP_X86_XOR_REG_MEMBASE;
14054 #ifdef TARGET_AMD64
14055 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14058 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14060 return OP_X86_ADD_REG_MEMBASE;
14062 return OP_X86_SUB_REG_MEMBASE;
14064 return OP_X86_AND_REG_MEMBASE;
14066 return OP_X86_OR_REG_MEMBASE;
14068 return OP_X86_XOR_REG_MEMBASE;
14070 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14074 return OP_AMD64_COMPARE_REG_MEMBASE;
14076 return OP_AMD64_ADD_REG_MEMBASE;
14078 return OP_AMD64_SUB_REG_MEMBASE;
14080 return OP_AMD64_AND_REG_MEMBASE;
14082 return OP_AMD64_OR_REG_MEMBASE;
14084 return OP_AMD64_XOR_REG_MEMBASE;
14093 mono_op_to_op_imm_noemul (int opcode)
14096 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14102 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14109 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14114 return mono_op_to_op_imm (opcode);
14119 * mono_handle_global_vregs:
14121 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14125 mono_handle_global_vregs (MonoCompile *cfg)
14127 gint32 *vreg_to_bb;
14128 MonoBasicBlock *bb;
14131 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14133 #ifdef MONO_ARCH_SIMD_INTRINSICS
14134 if (cfg->uses_simd_intrinsics)
14135 mono_simd_simplify_indirection (cfg);
14138 /* Find local vregs used in more than one bb */
14139 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14140 MonoInst *ins = bb->code;
14141 int block_num = bb->block_num;
14143 if (cfg->verbose_level > 2)
14144 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14147 for (; ins; ins = ins->next) {
14148 const char *spec = INS_INFO (ins->opcode);
14149 int regtype = 0, regindex;
14152 if (G_UNLIKELY (cfg->verbose_level > 2))
14153 mono_print_ins (ins);
14155 g_assert (ins->opcode >= MONO_CEE_LAST);
14157 for (regindex = 0; regindex < 4; regindex ++) {
14160 if (regindex == 0) {
14161 regtype = spec [MONO_INST_DEST];
14162 if (regtype == ' ')
14165 } else if (regindex == 1) {
14166 regtype = spec [MONO_INST_SRC1];
14167 if (regtype == ' ')
14170 } else if (regindex == 2) {
14171 regtype = spec [MONO_INST_SRC2];
14172 if (regtype == ' ')
14175 } else if (regindex == 3) {
14176 regtype = spec [MONO_INST_SRC3];
14177 if (regtype == ' ')
14182 #if SIZEOF_REGISTER == 4
14183 /* In the LLVM case, the long opcodes are not decomposed */
14184 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14186 * Since some instructions reference the original long vreg,
14187 * and some reference the two component vregs, it is quite hard
14188 * to determine when it needs to be global. So be conservative.
14190 if (!get_vreg_to_inst (cfg, vreg)) {
14191 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14193 if (cfg->verbose_level > 2)
14194 printf ("LONG VREG R%d made global.\n", vreg);
14198 * Make the component vregs volatile since the optimizations can
14199 * get confused otherwise.
14201 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14202 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14206 g_assert (vreg != -1);
14208 prev_bb = vreg_to_bb [vreg];
14209 if (prev_bb == 0) {
14210 /* 0 is a valid block num */
14211 vreg_to_bb [vreg] = block_num + 1;
14212 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14213 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14216 if (!get_vreg_to_inst (cfg, vreg)) {
14217 if (G_UNLIKELY (cfg->verbose_level > 2))
14218 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14222 if (vreg_is_ref (cfg, vreg))
14223 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14225 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14228 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14231 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14234 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14237 g_assert_not_reached ();
14241 /* Flag as having been used in more than one bb */
14242 vreg_to_bb [vreg] = -1;
14248 /* If a variable is used in only one bblock, convert it into a local vreg */
14249 for (i = 0; i < cfg->num_varinfo; i++) {
14250 MonoInst *var = cfg->varinfo [i];
14251 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14253 switch (var->type) {
14259 #if SIZEOF_REGISTER == 8
14262 #if !defined(TARGET_X86)
14263 /* Enabling this screws up the fp stack on x86 */
14266 if (mono_arch_is_soft_float ())
14270 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14274 /* Arguments are implicitly global */
14275 /* Putting R4 vars into registers doesn't work currently */
14276 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14277 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14279 * Make that the variable's liveness interval doesn't contain a call, since
14280 * that would cause the lvreg to be spilled, making the whole optimization
14283 /* This is too slow for JIT compilation */
14285 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14287 int def_index, call_index, ins_index;
14288 gboolean spilled = FALSE;
14293 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14294 const char *spec = INS_INFO (ins->opcode);
14296 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14297 def_index = ins_index;
14299 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14300 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14301 if (call_index > def_index) {
14307 if (MONO_IS_CALL (ins))
14308 call_index = ins_index;
14318 if (G_UNLIKELY (cfg->verbose_level > 2))
14319 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14320 var->flags |= MONO_INST_IS_DEAD;
14321 cfg->vreg_to_inst [var->dreg] = NULL;
14328 * Compress the varinfo and vars tables so the liveness computation is faster and
14329 * takes up less space.
14332 for (i = 0; i < cfg->num_varinfo; ++i) {
14333 MonoInst *var = cfg->varinfo [i];
14334 if (pos < i && cfg->locals_start == i)
14335 cfg->locals_start = pos;
14336 if (!(var->flags & MONO_INST_IS_DEAD)) {
14338 cfg->varinfo [pos] = cfg->varinfo [i];
14339 cfg->varinfo [pos]->inst_c0 = pos;
14340 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14341 cfg->vars [pos].idx = pos;
14342 #if SIZEOF_REGISTER == 4
14343 if (cfg->varinfo [pos]->type == STACK_I8) {
14344 /* Modify the two component vars too */
14347 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14348 var1->inst_c0 = pos;
14349 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14350 var1->inst_c0 = pos;
14357 cfg->num_varinfo = pos;
14358 if (cfg->locals_start > cfg->num_varinfo)
14359 cfg->locals_start = cfg->num_varinfo;
14363 * mono_allocate_gsharedvt_vars:
14365 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14366 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14369 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14373 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14375 for (i = 0; i < cfg->num_varinfo; ++i) {
14376 MonoInst *ins = cfg->varinfo [i];
14379 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14380 if (i >= cfg->locals_start) {
14382 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14383 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14384 ins->opcode = OP_GSHAREDVT_LOCAL;
14385 ins->inst_imm = idx;
14388 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14389 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14396 * mono_spill_global_vars:
14398 * Generate spill code for variables which are not allocated to registers,
14399 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14400 * code is generated which could be optimized by the local optimization passes.
14403 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14405 MonoBasicBlock *bb;
14407 int orig_next_vreg;
14408 guint32 *vreg_to_lvreg;
14410 guint32 i, lvregs_len;
14411 gboolean dest_has_lvreg = FALSE;
14412 MonoStackType stacktypes [128];
14413 MonoInst **live_range_start, **live_range_end;
14414 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14416 *need_local_opts = FALSE;
14418 memset (spec2, 0, sizeof (spec2));
14420 /* FIXME: Move this function to mini.c */
14421 stacktypes ['i'] = STACK_PTR;
14422 stacktypes ['l'] = STACK_I8;
14423 stacktypes ['f'] = STACK_R8;
14424 #ifdef MONO_ARCH_SIMD_INTRINSICS
14425 stacktypes ['x'] = STACK_VTYPE;
14428 #if SIZEOF_REGISTER == 4
14429 /* Create MonoInsts for longs */
14430 for (i = 0; i < cfg->num_varinfo; i++) {
14431 MonoInst *ins = cfg->varinfo [i];
14433 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14434 switch (ins->type) {
14439 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14442 g_assert (ins->opcode == OP_REGOFFSET);
14444 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14446 tree->opcode = OP_REGOFFSET;
14447 tree->inst_basereg = ins->inst_basereg;
14448 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14450 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14452 tree->opcode = OP_REGOFFSET;
14453 tree->inst_basereg = ins->inst_basereg;
14454 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14464 if (cfg->compute_gc_maps) {
14465 /* registers need liveness info even for !non refs */
14466 for (i = 0; i < cfg->num_varinfo; i++) {
14467 MonoInst *ins = cfg->varinfo [i];
14469 if (ins->opcode == OP_REGVAR)
14470 ins->flags |= MONO_INST_GC_TRACK;
14474 /* FIXME: widening and truncation */
14477 * As an optimization, when a variable allocated to the stack is first loaded into
14478 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14479 * the variable again.
14481 orig_next_vreg = cfg->next_vreg;
14482 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14483 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14487 * These arrays contain the first and last instructions accessing a given
14489 * Since we emit bblocks in the same order we process them here, and we
14490 * don't split live ranges, these will precisely describe the live range of
14491 * the variable, i.e. the instruction range where a valid value can be found
14492 * in the variables location.
14493 * The live range is computed using the liveness info computed by the liveness pass.
14494 * We can't use vmv->range, since that is an abstract live range, and we need
14495 * one which is instruction precise.
14496 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14498 /* FIXME: Only do this if debugging info is requested */
14499 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14500 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14501 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14502 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14504 /* Add spill loads/stores */
14505 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14508 if (cfg->verbose_level > 2)
14509 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14511 /* Clear vreg_to_lvreg array */
14512 for (i = 0; i < lvregs_len; i++)
14513 vreg_to_lvreg [lvregs [i]] = 0;
14517 MONO_BB_FOR_EACH_INS (bb, ins) {
14518 const char *spec = INS_INFO (ins->opcode);
14519 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14520 gboolean store, no_lvreg;
14521 int sregs [MONO_MAX_SRC_REGS];
14523 if (G_UNLIKELY (cfg->verbose_level > 2))
14524 mono_print_ins (ins);
14526 if (ins->opcode == OP_NOP)
14530 * We handle LDADDR here as well, since it can only be decomposed
14531 * when variable addresses are known.
14533 if (ins->opcode == OP_LDADDR) {
14534 MonoInst *var = (MonoInst *)ins->inst_p0;
14536 if (var->opcode == OP_VTARG_ADDR) {
14537 /* Happens on SPARC/S390 where vtypes are passed by reference */
14538 MonoInst *vtaddr = var->inst_left;
14539 if (vtaddr->opcode == OP_REGVAR) {
14540 ins->opcode = OP_MOVE;
14541 ins->sreg1 = vtaddr->dreg;
14543 else if (var->inst_left->opcode == OP_REGOFFSET) {
14544 ins->opcode = OP_LOAD_MEMBASE;
14545 ins->inst_basereg = vtaddr->inst_basereg;
14546 ins->inst_offset = vtaddr->inst_offset;
14549 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14550 /* gsharedvt arg passed by ref */
14551 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14553 ins->opcode = OP_LOAD_MEMBASE;
14554 ins->inst_basereg = var->inst_basereg;
14555 ins->inst_offset = var->inst_offset;
14556 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14557 MonoInst *load, *load2, *load3;
14558 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14559 int reg1, reg2, reg3;
14560 MonoInst *info_var = cfg->gsharedvt_info_var;
14561 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14565 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14568 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14570 g_assert (info_var);
14571 g_assert (locals_var);
14573 /* Mark the instruction used to compute the locals var as used */
14574 cfg->gsharedvt_locals_var_ins = NULL;
14576 /* Load the offset */
14577 if (info_var->opcode == OP_REGOFFSET) {
14578 reg1 = alloc_ireg (cfg);
14579 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14580 } else if (info_var->opcode == OP_REGVAR) {
14582 reg1 = info_var->dreg;
14584 g_assert_not_reached ();
14586 reg2 = alloc_ireg (cfg);
14587 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14588 /* Load the locals area address */
14589 reg3 = alloc_ireg (cfg);
14590 if (locals_var->opcode == OP_REGOFFSET) {
14591 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14592 } else if (locals_var->opcode == OP_REGVAR) {
14593 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14595 g_assert_not_reached ();
14597 /* Compute the address */
14598 ins->opcode = OP_PADD;
14602 mono_bblock_insert_before_ins (bb, ins, load3);
14603 mono_bblock_insert_before_ins (bb, load3, load2);
14605 mono_bblock_insert_before_ins (bb, load2, load);
14607 g_assert (var->opcode == OP_REGOFFSET);
14609 ins->opcode = OP_ADD_IMM;
14610 ins->sreg1 = var->inst_basereg;
14611 ins->inst_imm = var->inst_offset;
14614 *need_local_opts = TRUE;
14615 spec = INS_INFO (ins->opcode);
14618 if (ins->opcode < MONO_CEE_LAST) {
14619 mono_print_ins (ins);
14620 g_assert_not_reached ();
14624 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14628 if (MONO_IS_STORE_MEMBASE (ins)) {
14629 tmp_reg = ins->dreg;
14630 ins->dreg = ins->sreg2;
14631 ins->sreg2 = tmp_reg;
14634 spec2 [MONO_INST_DEST] = ' ';
14635 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14636 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14637 spec2 [MONO_INST_SRC3] = ' ';
14639 } else if (MONO_IS_STORE_MEMINDEX (ins))
14640 g_assert_not_reached ();
14645 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14646 printf ("\t %.3s %d", spec, ins->dreg);
14647 num_sregs = mono_inst_get_src_registers (ins, sregs);
14648 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14649 printf (" %d", sregs [srcindex]);
14656 regtype = spec [MONO_INST_DEST];
14657 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14660 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14661 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14662 MonoInst *store_ins;
14664 MonoInst *def_ins = ins;
14665 int dreg = ins->dreg; /* The original vreg */
14667 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14669 if (var->opcode == OP_REGVAR) {
14670 ins->dreg = var->dreg;
14671 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14673 * Instead of emitting a load+store, use a _membase opcode.
14675 g_assert (var->opcode == OP_REGOFFSET);
14676 if (ins->opcode == OP_MOVE) {
14680 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14681 ins->inst_basereg = var->inst_basereg;
14682 ins->inst_offset = var->inst_offset;
14685 spec = INS_INFO (ins->opcode);
14689 g_assert (var->opcode == OP_REGOFFSET);
14691 prev_dreg = ins->dreg;
14693 /* Invalidate any previous lvreg for this vreg */
14694 vreg_to_lvreg [ins->dreg] = 0;
14698 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14700 store_opcode = OP_STOREI8_MEMBASE_REG;
14703 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14705 #if SIZEOF_REGISTER != 8
14706 if (regtype == 'l') {
14707 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14708 mono_bblock_insert_after_ins (bb, ins, store_ins);
14709 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14710 mono_bblock_insert_after_ins (bb, ins, store_ins);
14711 def_ins = store_ins;
14716 g_assert (store_opcode != OP_STOREV_MEMBASE);
14718 /* Try to fuse the store into the instruction itself */
14719 /* FIXME: Add more instructions */
14720 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14721 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14722 ins->inst_imm = ins->inst_c0;
14723 ins->inst_destbasereg = var->inst_basereg;
14724 ins->inst_offset = var->inst_offset;
14725 spec = INS_INFO (ins->opcode);
14726 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14727 ins->opcode = store_opcode;
14728 ins->inst_destbasereg = var->inst_basereg;
14729 ins->inst_offset = var->inst_offset;
14733 tmp_reg = ins->dreg;
14734 ins->dreg = ins->sreg2;
14735 ins->sreg2 = tmp_reg;
14738 spec2 [MONO_INST_DEST] = ' ';
14739 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14740 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14741 spec2 [MONO_INST_SRC3] = ' ';
14743 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14744 // FIXME: The backends expect the base reg to be in inst_basereg
14745 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14747 ins->inst_basereg = var->inst_basereg;
14748 ins->inst_offset = var->inst_offset;
14749 spec = INS_INFO (ins->opcode);
14751 /* printf ("INS: "); mono_print_ins (ins); */
14752 /* Create a store instruction */
14753 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14755 /* Insert it after the instruction */
14756 mono_bblock_insert_after_ins (bb, ins, store_ins);
14758 def_ins = store_ins;
14761 * We can't assign ins->dreg to var->dreg here, since the
14762 * sregs could use it. So set a flag, and do it after
14765 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14766 dest_has_lvreg = TRUE;
14771 if (def_ins && !live_range_start [dreg]) {
14772 live_range_start [dreg] = def_ins;
14773 live_range_start_bb [dreg] = bb;
14776 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14779 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14780 tmp->inst_c1 = dreg;
14781 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14788 num_sregs = mono_inst_get_src_registers (ins, sregs);
14789 for (srcindex = 0; srcindex < 3; ++srcindex) {
14790 regtype = spec [MONO_INST_SRC1 + srcindex];
14791 sreg = sregs [srcindex];
14793 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14794 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14795 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14796 MonoInst *use_ins = ins;
14797 MonoInst *load_ins;
14798 guint32 load_opcode;
14800 if (var->opcode == OP_REGVAR) {
14801 sregs [srcindex] = var->dreg;
14802 //mono_inst_set_src_registers (ins, sregs);
14803 live_range_end [sreg] = use_ins;
14804 live_range_end_bb [sreg] = bb;
14806 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14809 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14810 /* var->dreg is a hreg */
14811 tmp->inst_c1 = sreg;
14812 mono_bblock_insert_after_ins (bb, ins, tmp);
14818 g_assert (var->opcode == OP_REGOFFSET);
14820 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14822 g_assert (load_opcode != OP_LOADV_MEMBASE);
14824 if (vreg_to_lvreg [sreg]) {
14825 g_assert (vreg_to_lvreg [sreg] != -1);
14827 /* The variable is already loaded to an lvreg */
14828 if (G_UNLIKELY (cfg->verbose_level > 2))
14829 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14830 sregs [srcindex] = vreg_to_lvreg [sreg];
14831 //mono_inst_set_src_registers (ins, sregs);
14835 /* Try to fuse the load into the instruction */
14836 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14837 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14838 sregs [0] = var->inst_basereg;
14839 //mono_inst_set_src_registers (ins, sregs);
14840 ins->inst_offset = var->inst_offset;
14841 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14842 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14843 sregs [1] = var->inst_basereg;
14844 //mono_inst_set_src_registers (ins, sregs);
14845 ins->inst_offset = var->inst_offset;
14847 if (MONO_IS_REAL_MOVE (ins)) {
14848 ins->opcode = OP_NOP;
14851 //printf ("%d ", srcindex); mono_print_ins (ins);
14853 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14855 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14856 if (var->dreg == prev_dreg) {
14858 * sreg refers to the value loaded by the load
14859 * emitted below, but we need to use ins->dreg
14860 * since it refers to the store emitted earlier.
14864 g_assert (sreg != -1);
14865 vreg_to_lvreg [var->dreg] = sreg;
14866 g_assert (lvregs_len < 1024);
14867 lvregs [lvregs_len ++] = var->dreg;
14871 sregs [srcindex] = sreg;
14872 //mono_inst_set_src_registers (ins, sregs);
14874 #if SIZEOF_REGISTER != 8
14875 if (regtype == 'l') {
14876 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14877 mono_bblock_insert_before_ins (bb, ins, load_ins);
14878 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14879 mono_bblock_insert_before_ins (bb, ins, load_ins);
14880 use_ins = load_ins;
14885 #if SIZEOF_REGISTER == 4
14886 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14888 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14889 mono_bblock_insert_before_ins (bb, ins, load_ins);
14890 use_ins = load_ins;
14894 if (var->dreg < orig_next_vreg) {
14895 live_range_end [var->dreg] = use_ins;
14896 live_range_end_bb [var->dreg] = bb;
14899 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14902 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14903 tmp->inst_c1 = var->dreg;
14904 mono_bblock_insert_after_ins (bb, ins, tmp);
14908 mono_inst_set_src_registers (ins, sregs);
14910 if (dest_has_lvreg) {
14911 g_assert (ins->dreg != -1);
14912 vreg_to_lvreg [prev_dreg] = ins->dreg;
14913 g_assert (lvregs_len < 1024);
14914 lvregs [lvregs_len ++] = prev_dreg;
14915 dest_has_lvreg = FALSE;
14919 tmp_reg = ins->dreg;
14920 ins->dreg = ins->sreg2;
14921 ins->sreg2 = tmp_reg;
14924 if (MONO_IS_CALL (ins)) {
14925 /* Clear vreg_to_lvreg array */
14926 for (i = 0; i < lvregs_len; i++)
14927 vreg_to_lvreg [lvregs [i]] = 0;
14929 } else if (ins->opcode == OP_NOP) {
14931 MONO_INST_NULLIFY_SREGS (ins);
14934 if (cfg->verbose_level > 2)
14935 mono_print_ins_index (1, ins);
14938 /* Extend the live range based on the liveness info */
14939 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14940 for (i = 0; i < cfg->num_varinfo; i ++) {
14941 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14943 if (vreg_is_volatile (cfg, vi->vreg))
14944 /* The liveness info is incomplete */
14947 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14948 /* Live from at least the first ins of this bb */
14949 live_range_start [vi->vreg] = bb->code;
14950 live_range_start_bb [vi->vreg] = bb;
14953 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14954 /* Live at least until the last ins of this bb */
14955 live_range_end [vi->vreg] = bb->last_ins;
14956 live_range_end_bb [vi->vreg] = bb;
14963 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14964 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14966 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14967 for (i = 0; i < cfg->num_varinfo; ++i) {
14968 int vreg = MONO_VARINFO (cfg, i)->vreg;
14971 if (live_range_start [vreg]) {
14972 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14974 ins->inst_c1 = vreg;
14975 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14977 if (live_range_end [vreg]) {
14978 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14980 ins->inst_c1 = vreg;
14981 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14982 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14984 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14989 if (cfg->gsharedvt_locals_var_ins) {
14990 /* Nullify if unused */
14991 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14992 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14995 g_free (live_range_start);
14996 g_free (live_range_end);
14997 g_free (live_range_start_bb);
14998 g_free (live_range_end_bb);
15003 * - use 'iadd' instead of 'int_add'
15004 * - handling ovf opcodes: decompose in method_to_ir.
15005 * - unify iregs/fregs
15006 * -> partly done, the missing parts are:
15007 * - a more complete unification would involve unifying the hregs as well, so
15008 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15009 * would no longer map to the machine hregs, so the code generators would need to
15010 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15011 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15012 * fp/non-fp branches speeds it up by about 15%.
15013 * - use sext/zext opcodes instead of shifts
15015 * - get rid of TEMPLOADs if possible and use vregs instead
15016 * - clean up usage of OP_P/OP_ opcodes
15017 * - cleanup usage of DUMMY_USE
15018 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15020 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15021 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15022 * - make sure handle_stack_args () is called before the branch is emitted
15023 * - when the new IR is done, get rid of all unused stuff
15024 * - COMPARE/BEQ as separate instructions or unify them ?
15025 * - keeping them separate allows specialized compare instructions like
15026 * compare_imm, compare_membase
15027 * - most back ends unify fp compare+branch, fp compare+ceq
15028 * - integrate mono_save_args into inline_method
15029 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15030 * - handle long shift opts on 32 bit platforms somehow: they require
15031 * 3 sregs (2 for arg1 and 1 for arg2)
15032 * - make byref a 'normal' type.
15033 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15034 * variable if needed.
15035 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15036 * like inline_method.
15037 * - remove inlining restrictions
15038 * - fix LNEG and enable cfold of INEG
15039 * - generalize x86 optimizations like ldelema as a peephole optimization
15040 * - add store_mem_imm for amd64
15041 * - optimize the loading of the interruption flag in the managed->native wrappers
15042 * - avoid special handling of OP_NOP in passes
15043 * - move code inserting instructions into one function/macro.
15044 * - try a coalescing phase after liveness analysis
15045 * - add float -> vreg conversion + local optimizations on !x86
15046 * - figure out how to handle decomposed branches during optimizations, ie.
15047 * compare+branch, op_jump_table+op_br etc.
15048 * - promote RuntimeXHandles to vregs
15049 * - vtype cleanups:
15050 * - add a NEW_VARLOADA_VREG macro
15051 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15052 * accessing vtype fields.
15053 * - get rid of I8CONST on 64 bit platforms
15054 * - dealing with the increase in code size due to branches created during opcode
15056 * - use extended basic blocks
15057 * - all parts of the JIT
15058 * - handle_global_vregs () && local regalloc
15059 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15060 * - sources of increase in code size:
15063 * - isinst and castclass
15064 * - lvregs not allocated to global registers even if used multiple times
15065 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15067 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15068 * - add all micro optimizations from the old JIT
15069 * - put tree optimizations into the deadce pass
15070 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15071 * specific function.
15072 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15073 * fcompare + branchCC.
15074 * - create a helper function for allocating a stack slot, taking into account
15075 * MONO_CFG_HAS_SPILLUP.
15077 * - merge the ia64 switch changes.
15078 * - optimize mono_regstate2_alloc_int/float.
15079 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15080 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15081 * parts of the tree could be separated by other instructions, killing the tree
15082 * arguments, or stores killing loads etc. Also, should we fold loads into other
15083 * instructions if the result of the load is used multiple times ?
15084 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15085 * - LAST MERGE: 108395.
15086 * - when returning vtypes in registers, generate IR and append it to the end of the
15087 * last bb instead of doing it in the epilog.
15088 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15096 - When to decompose opcodes:
15097 - earlier: this makes some optimizations hard to implement, since the low level IR
15098 no longer contains the neccessary information. But it is easier to do.
15099 - later: harder to implement, enables more optimizations.
15100 - Branches inside bblocks:
15101 - created when decomposing complex opcodes.
15102 - branches to another bblock: harmless, but not tracked by the branch
15103 optimizations, so need to branch to a label at the start of the bblock.
15104 - branches to inside the same bblock: very problematic, trips up the local
15105 reg allocator. Can be fixed by spitting the current bblock, but that is a
15106 complex operation, since some local vregs can become global vregs etc.
15107 - Local/global vregs:
15108 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15109 local register allocator.
15110 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15111 structure, created by mono_create_var (). Assigned to hregs or the stack by
15112 the global register allocator.
15113 - When to do optimizations like alu->alu_imm:
15114 - earlier -> saves work later on since the IR will be smaller/simpler
15115 - later -> can work on more instructions
15116 - Handling of valuetypes:
15117 - When a vtype is pushed on the stack, a new temporary is created, an
15118 instruction computing its address (LDADDR) is emitted and pushed on
15119 the stack. Need to optimize cases when the vtype is used immediately as in
15120 argument passing, stloc etc.
15121 - Instead of the to_end stuff in the old JIT, simply call the function handling
15122 the values on the stack before emitting the last instruction of the bb.
15125 #endif /* DISABLE_JIT */