2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/monitor.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
70 #include "jit-icalls.h"
72 #include "debugger-agent.h"
73 #include "seq-points.h"
74 #include "aot-compiler.h"
75 #include "mini-llvm.h"
77 #define BRANCH_COST 10
78 #define INLINE_LENGTH_LIMIT 20
80 /* These have 'cfg' as an implicit argument */
81 #define INLINE_FAILURE(msg) do { \
82 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
83 inline_failure (cfg, msg); \
84 goto exception_exit; \
87 #define CHECK_CFG_EXCEPTION do {\
88 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
89 goto exception_exit; \
91 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
92 method_access_failure ((cfg), (method), (cmethod)); \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
163 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
166 * Instruction metadata
174 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
175 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
186 /* keep in sync with the enum in mini.h */
189 #include "mini-ops.h"
194 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
195 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
197 * This should contain the index of the last sreg + 1. This is not the same
198 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
200 const gint8 ins_sreg_counts[] = {
201 #include "mini-ops.h"
206 #define MONO_INIT_VARINFO(vi,id) do { \
207 (vi)->range.first_use.pos.bid = 0xffff; \
213 mono_alloc_ireg (MonoCompile *cfg)
215 return alloc_ireg (cfg);
219 mono_alloc_lreg (MonoCompile *cfg)
221 return alloc_lreg (cfg);
225 mono_alloc_freg (MonoCompile *cfg)
227 return alloc_freg (cfg);
231 mono_alloc_preg (MonoCompile *cfg)
233 return alloc_preg (cfg);
237 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
239 return alloc_dreg (cfg, stack_type);
243 * mono_alloc_ireg_ref:
245 * Allocate an IREG, and mark it as holding a GC ref.
248 mono_alloc_ireg_ref (MonoCompile *cfg)
250 return alloc_ireg_ref (cfg);
254 * mono_alloc_ireg_mp:
256 * Allocate an IREG, and mark it as holding a managed pointer.
259 mono_alloc_ireg_mp (MonoCompile *cfg)
261 return alloc_ireg_mp (cfg);
265 * mono_alloc_ireg_copy:
267 * Allocate an IREG with the same GC type as VREG.
270 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
272 if (vreg_is_ref (cfg, vreg))
273 return alloc_ireg_ref (cfg);
274 else if (vreg_is_mp (cfg, vreg))
275 return alloc_ireg_mp (cfg);
277 return alloc_ireg (cfg);
281 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
286 type = mini_get_underlying_type (type);
288 switch (type->type) {
301 case MONO_TYPE_FNPTR:
303 case MONO_TYPE_CLASS:
304 case MONO_TYPE_STRING:
305 case MONO_TYPE_OBJECT:
306 case MONO_TYPE_SZARRAY:
307 case MONO_TYPE_ARRAY:
311 #if SIZEOF_REGISTER == 8
317 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
320 case MONO_TYPE_VALUETYPE:
321 if (type->data.klass->enumtype) {
322 type = mono_class_enum_basetype (type->data.klass);
325 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 case MONO_TYPE_TYPEDBYREF:
330 case MONO_TYPE_GENERICINST:
331 type = &type->data.generic_class->container_class->byval_arg;
335 g_assert (cfg->gshared);
336 if (mini_type_var_is_vt (type))
339 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
341 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
347 mono_print_bb (MonoBasicBlock *bb, const char *msg)
352 printf ("\n%s %d: [IN: ", msg, bb->block_num);
353 for (i = 0; i < bb->in_count; ++i)
354 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
356 for (i = 0; i < bb->out_count; ++i)
357 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
359 for (tree = bb->code; tree; tree = tree->next)
360 mono_print_ins_index (-1, tree);
364 mono_create_helper_signatures (void)
366 helper_sig_domain_get = mono_create_icall_signature ("ptr");
367 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
368 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
384 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
395 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, cfg->cbb, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, cfg->cbb, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, cfg->cbb, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (cfg->cbb, cmp); \
559 MONO_ADD_INS (cfg->cbb, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 for (i = 0; i < header->num_clauses; ++i) {
667 clause = &header->clauses [i];
669 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
670 return ((i + 1) << 8) | clause->flags;
677 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
679 MonoMethodHeader *header = cfg->header;
680 MonoExceptionClause *clause;
684 for (i = 0; i < header->num_clauses; ++i) {
685 clause = &header->clauses [i];
686 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
687 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
688 if (clause->flags == type)
689 res = g_list_append (res, clause);
696 mono_create_spvar_for_region (MonoCompile *cfg, int region)
700 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
704 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
705 /* prevent it from being register allocated */
706 var->flags |= MONO_INST_VOLATILE;
708 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
712 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
714 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
718 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
722 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
726 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
727 /* prevent it from being register allocated */
728 var->flags |= MONO_INST_VOLATILE;
730 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
736 * Returns the type used in the eval stack when @type is loaded.
737 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
740 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
744 type = mini_get_underlying_type (type);
745 inst->klass = klass = mono_class_from_mono_type (type);
747 inst->type = STACK_MP;
752 switch (type->type) {
754 inst->type = STACK_INV;
762 inst->type = STACK_I4;
767 case MONO_TYPE_FNPTR:
768 inst->type = STACK_PTR;
770 case MONO_TYPE_CLASS:
771 case MONO_TYPE_STRING:
772 case MONO_TYPE_OBJECT:
773 case MONO_TYPE_SZARRAY:
774 case MONO_TYPE_ARRAY:
775 inst->type = STACK_OBJ;
779 inst->type = STACK_I8;
782 inst->type = cfg->r4_stack_type;
785 inst->type = STACK_R8;
787 case MONO_TYPE_VALUETYPE:
788 if (type->data.klass->enumtype) {
789 type = mono_class_enum_basetype (type->data.klass);
793 inst->type = STACK_VTYPE;
796 case MONO_TYPE_TYPEDBYREF:
797 inst->klass = mono_defaults.typed_reference_class;
798 inst->type = STACK_VTYPE;
800 case MONO_TYPE_GENERICINST:
801 type = &type->data.generic_class->container_class->byval_arg;
805 g_assert (cfg->gshared);
806 if (mini_is_gsharedvt_type (type)) {
807 g_assert (cfg->gsharedvt);
808 inst->type = STACK_VTYPE;
810 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
814 g_error ("unknown type 0x%02x in eval stack type", type->type);
819 * The following tables are used to quickly validate the IL code in type_from_op ().
822 bin_num_table [STACK_MAX] [STACK_MAX] = {
823 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
828 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
836 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
839 /* reduce the size of this table */
841 bin_int_table [STACK_MAX] [STACK_MAX] = {
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
853 bin_comp_table [STACK_MAX] [STACK_MAX] = {
854 /* Inv i L p F & O vt r4 */
856 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
857 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
858 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
859 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
860 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
861 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
862 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
863 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
866 /* reduce the size of this table */
868 shift_table [STACK_MAX] [STACK_MAX] = {
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
880 * Tables to map from the non-specific opcode to the matching
881 * type-specific opcode.
883 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
885 binops_op_map [STACK_MAX] = {
886 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
889 /* handles from CEE_NEG to CEE_CONV_U8 */
891 unops_op_map [STACK_MAX] = {
892 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
895 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
897 ovfops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
901 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
903 ovf2ops_op_map [STACK_MAX] = {
904 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
907 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
909 ovf3ops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
913 /* handles from CEE_BEQ to CEE_BLT_UN */
915 beqops_op_map [STACK_MAX] = {
916 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
919 /* handles from CEE_CEQ to CEE_CLT_UN */
921 ceqops_op_map [STACK_MAX] = {
922 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
926 * Sets ins->type (the type on the eval stack) according to the
927 * type of the opcode and the arguments to it.
928 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
930 * FIXME: this function sets ins->type unconditionally in some cases, but
931 * it should set it to invalid for some types (a conv.x on an object)
934 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
936 switch (ins->opcode) {
943 /* FIXME: check unverifiable args for STACK_MP */
944 ins->type = bin_num_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_int_table [src1->type] [src2->type];
953 ins->opcode += binops_op_map [ins->type];
958 ins->type = shift_table [src1->type] [src2->type];
959 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
965 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
966 ins->opcode = OP_LCOMPARE;
967 else if (src1->type == STACK_R4)
968 ins->opcode = OP_RCOMPARE;
969 else if (src1->type == STACK_R8)
970 ins->opcode = OP_FCOMPARE;
972 ins->opcode = OP_ICOMPARE;
974 case OP_ICOMPARE_IMM:
975 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
976 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
977 ins->opcode = OP_LCOMPARE_IMM;
989 ins->opcode += beqops_op_map [src1->type];
992 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
993 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1000 ins->opcode += ceqops_op_map [src1->type];
1004 ins->type = neg_table [src1->type];
1005 ins->opcode += unops_op_map [ins->type];
1008 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1009 ins->type = src1->type;
1011 ins->type = STACK_INV;
1012 ins->opcode += unops_op_map [ins->type];
1018 ins->type = STACK_I4;
1019 ins->opcode += unops_op_map [src1->type];
1022 ins->type = STACK_R8;
1023 switch (src1->type) {
1026 ins->opcode = OP_ICONV_TO_R_UN;
1029 ins->opcode = OP_LCONV_TO_R_UN;
1033 case CEE_CONV_OVF_I1:
1034 case CEE_CONV_OVF_U1:
1035 case CEE_CONV_OVF_I2:
1036 case CEE_CONV_OVF_U2:
1037 case CEE_CONV_OVF_I4:
1038 case CEE_CONV_OVF_U4:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf3ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I_UN:
1043 case CEE_CONV_OVF_U_UN:
1044 ins->type = STACK_PTR;
1045 ins->opcode += ovf2ops_op_map [src1->type];
1047 case CEE_CONV_OVF_I1_UN:
1048 case CEE_CONV_OVF_I2_UN:
1049 case CEE_CONV_OVF_I4_UN:
1050 case CEE_CONV_OVF_U1_UN:
1051 case CEE_CONV_OVF_U2_UN:
1052 case CEE_CONV_OVF_U4_UN:
1053 ins->type = STACK_I4;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1057 ins->type = STACK_PTR;
1058 switch (src1->type) {
1060 ins->opcode = OP_ICONV_TO_U;
1064 #if SIZEOF_VOID_P == 8
1065 ins->opcode = OP_LCONV_TO_U;
1067 ins->opcode = OP_MOVE;
1071 ins->opcode = OP_LCONV_TO_U;
1074 ins->opcode = OP_FCONV_TO_U;
1080 ins->type = STACK_I8;
1081 ins->opcode += unops_op_map [src1->type];
1083 case CEE_CONV_OVF_I8:
1084 case CEE_CONV_OVF_U8:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf3ops_op_map [src1->type];
1088 case CEE_CONV_OVF_U8_UN:
1089 case CEE_CONV_OVF_I8_UN:
1090 ins->type = STACK_I8;
1091 ins->opcode += ovf2ops_op_map [src1->type];
1094 ins->type = cfg->r4_stack_type;
1095 ins->opcode += unops_op_map [src1->type];
1098 ins->type = STACK_R8;
1099 ins->opcode += unops_op_map [src1->type];
1102 ins->type = STACK_R8;
1106 ins->type = STACK_I4;
1107 ins->opcode += ovfops_op_map [src1->type];
1110 case CEE_CONV_OVF_I:
1111 case CEE_CONV_OVF_U:
1112 ins->type = STACK_PTR;
1113 ins->opcode += ovfops_op_map [src1->type];
1116 case CEE_ADD_OVF_UN:
1118 case CEE_MUL_OVF_UN:
1120 case CEE_SUB_OVF_UN:
1121 ins->type = bin_num_table [src1->type] [src2->type];
1122 ins->opcode += ovfops_op_map [src1->type];
1123 if (ins->type == STACK_R8)
1124 ins->type = STACK_INV;
1126 case OP_LOAD_MEMBASE:
1127 ins->type = STACK_PTR;
1129 case OP_LOADI1_MEMBASE:
1130 case OP_LOADU1_MEMBASE:
1131 case OP_LOADI2_MEMBASE:
1132 case OP_LOADU2_MEMBASE:
1133 case OP_LOADI4_MEMBASE:
1134 case OP_LOADU4_MEMBASE:
1135 ins->type = STACK_PTR;
1137 case OP_LOADI8_MEMBASE:
1138 ins->type = STACK_I8;
1140 case OP_LOADR4_MEMBASE:
1141 ins->type = cfg->r4_stack_type;
1143 case OP_LOADR8_MEMBASE:
1144 ins->type = STACK_R8;
1147 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1151 if (ins->type == STACK_MP)
1152 ins->klass = mono_defaults.object_class;
1157 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1163 param_table [STACK_MAX] [STACK_MAX] = {
1168 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1173 switch (args->type) {
1183 for (i = 0; i < sig->param_count; ++i) {
1184 switch (args [i].type) {
1188 if (!sig->params [i]->byref)
1192 if (sig->params [i]->byref)
1194 switch (sig->params [i]->type) {
1195 case MONO_TYPE_CLASS:
1196 case MONO_TYPE_STRING:
1197 case MONO_TYPE_OBJECT:
1198 case MONO_TYPE_SZARRAY:
1199 case MONO_TYPE_ARRAY:
1206 if (sig->params [i]->byref)
1208 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1217 /*if (!param_table [args [i].type] [sig->params [i]->type])
1225 * When we need a pointer to the current domain many times in a method, we
1226 * call mono_domain_get() once and we store the result in a local variable.
1227 * This function returns the variable that represents the MonoDomain*.
1229 inline static MonoInst *
1230 mono_get_domainvar (MonoCompile *cfg)
1232 if (!cfg->domainvar)
1233 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1234 return cfg->domainvar;
1238 * The got_var contains the address of the Global Offset Table when AOT
1242 mono_get_got_var (MonoCompile *cfg)
1244 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1246 if (!cfg->got_var) {
1247 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1249 return cfg->got_var;
1253 mono_get_vtable_var (MonoCompile *cfg)
1255 g_assert (cfg->gshared);
1257 if (!cfg->rgctx_var) {
1258 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1259 /* force the var to be stack allocated */
1260 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1263 return cfg->rgctx_var;
1267 type_from_stack_type (MonoInst *ins) {
1268 switch (ins->type) {
1269 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1270 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1271 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1272 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1273 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1275 return &ins->klass->this_arg;
1276 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1277 case STACK_VTYPE: return &ins->klass->byval_arg;
1279 g_error ("stack type %d to monotype not handled\n", ins->type);
1284 static G_GNUC_UNUSED int
1285 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1287 t = mono_type_get_underlying_type (t);
1299 case MONO_TYPE_FNPTR:
1301 case MONO_TYPE_CLASS:
1302 case MONO_TYPE_STRING:
1303 case MONO_TYPE_OBJECT:
1304 case MONO_TYPE_SZARRAY:
1305 case MONO_TYPE_ARRAY:
1311 return cfg->r4_stack_type;
1314 case MONO_TYPE_VALUETYPE:
1315 case MONO_TYPE_TYPEDBYREF:
1317 case MONO_TYPE_GENERICINST:
1318 if (mono_type_generic_inst_is_valuetype (t))
1324 g_assert_not_reached ();
1331 array_access_to_klass (int opcode)
1335 return mono_defaults.byte_class;
1337 return mono_defaults.uint16_class;
1340 return mono_defaults.int_class;
1343 return mono_defaults.sbyte_class;
1346 return mono_defaults.int16_class;
1349 return mono_defaults.int32_class;
1351 return mono_defaults.uint32_class;
1354 return mono_defaults.int64_class;
1357 return mono_defaults.single_class;
1360 return mono_defaults.double_class;
1361 case CEE_LDELEM_REF:
1362 case CEE_STELEM_REF:
1363 return mono_defaults.object_class;
1365 g_assert_not_reached ();
1371 * We try to share variables when possible
1374 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1379 /* inlining can result in deeper stacks */
1380 if (slot >= cfg->header->max_stack)
1381 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 pos = ins->type - 1 + slot * STACK_MAX;
1385 switch (ins->type) {
1392 if ((vnum = cfg->intvars [pos]))
1393 return cfg->varinfo [vnum];
1394 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1395 cfg->intvars [pos] = res->inst_c0;
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1404 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1407 * Don't use this if a generic_context is set, since that means AOT can't
1408 * look up the method using just the image+token.
1409 * table == 0 means this is a reference made from a wrapper.
1411 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1412 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1413 jump_info_token->image = image;
1414 jump_info_token->token = token;
1415 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1420 * This function is called to handle items that are left on the evaluation stack
1421 * at basic block boundaries. What happens is that we save the values to local variables
1422 * and we reload them later when first entering the target basic block (with the
1423 * handle_loaded_temps () function).
1424 * A single joint point will use the same variables (stored in the array bb->out_stack or
1425 * bb->in_stack, if the basic block is before or after the joint point).
1427 * This function needs to be called _before_ emitting the last instruction of
1428 * the bb (i.e. before emitting a branch).
1429 * If the stack merge fails at a join point, cfg->unverifiable is set.
1432 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1435 MonoBasicBlock *bb = cfg->cbb;
1436 MonoBasicBlock *outb;
1437 MonoInst *inst, **locals;
1442 if (cfg->verbose_level > 3)
1443 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1444 if (!bb->out_scount) {
1445 bb->out_scount = count;
1446 //printf ("bblock %d has out:", bb->block_num);
1448 for (i = 0; i < bb->out_count; ++i) {
1449 outb = bb->out_bb [i];
1450 /* exception handlers are linked, but they should not be considered for stack args */
1451 if (outb->flags & BB_EXCEPTION_HANDLER)
1453 //printf (" %d", outb->block_num);
1454 if (outb->in_stack) {
1456 bb->out_stack = outb->in_stack;
1462 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1463 for (i = 0; i < count; ++i) {
1465 * try to reuse temps already allocated for this purpouse, if they occupy the same
1466 * stack slot and if they are of the same type.
1467 * This won't cause conflicts since if 'local' is used to
1468 * store one of the values in the in_stack of a bblock, then
1469 * the same variable will be used for the same outgoing stack
1471 * This doesn't work when inlining methods, since the bblocks
1472 * in the inlined methods do not inherit their in_stack from
1473 * the bblock they are inlined to. See bug #58863 for an
1476 if (cfg->inlined_method)
1477 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1479 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1484 for (i = 0; i < bb->out_count; ++i) {
1485 outb = bb->out_bb [i];
1486 /* exception handlers are linked, but they should not be considered for stack args */
1487 if (outb->flags & BB_EXCEPTION_HANDLER)
1489 if (outb->in_scount) {
1490 if (outb->in_scount != bb->out_scount) {
1491 cfg->unverifiable = TRUE;
1494 continue; /* check they are the same locals */
1496 outb->in_scount = count;
1497 outb->in_stack = bb->out_stack;
1500 locals = bb->out_stack;
1502 for (i = 0; i < count; ++i) {
1503 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1504 inst->cil_code = sp [i]->cil_code;
1505 sp [i] = locals [i];
1506 if (cfg->verbose_level > 3)
1507 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1511 * It is possible that the out bblocks already have in_stack assigned, and
1512 * the in_stacks differ. In this case, we will store to all the different
1519 /* Find a bblock which has a different in_stack */
1521 while (bindex < bb->out_count) {
1522 outb = bb->out_bb [bindex];
1523 /* exception handlers are linked, but they should not be considered for stack args */
1524 if (outb->flags & BB_EXCEPTION_HANDLER) {
1528 if (outb->in_stack != locals) {
1529 for (i = 0; i < count; ++i) {
1530 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1531 inst->cil_code = sp [i]->cil_code;
1532 sp [i] = locals [i];
1533 if (cfg->verbose_level > 3)
1534 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1536 locals = outb->in_stack;
1546 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1550 if (cfg->compile_aot) {
1551 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1557 ji.type = patch_type;
1558 ji.data.target = data;
1559 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1560 mono_error_assert_ok (&error);
1562 EMIT_NEW_PCONST (cfg, ins, target);
1568 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1570 int ibitmap_reg = alloc_preg (cfg);
1571 #ifdef COMPRESSED_INTERFACE_BITMAP
1573 MonoInst *res, *ins;
1574 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1575 MONO_ADD_INS (cfg->cbb, ins);
1577 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1578 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1579 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1581 int ibitmap_byte_reg = alloc_preg (cfg);
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1585 if (cfg->compile_aot) {
1586 int iid_reg = alloc_preg (cfg);
1587 int shifted_iid_reg = alloc_preg (cfg);
1588 int ibitmap_byte_address_reg = alloc_preg (cfg);
1589 int masked_iid_reg = alloc_preg (cfg);
1590 int iid_one_bit_reg = alloc_preg (cfg);
1591 int iid_bit_reg = alloc_preg (cfg);
1592 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1594 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1597 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1608 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1609 * stored in "klass_reg" implements the interface "klass".
1612 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1614 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1618 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1619 * stored in "vtable_reg" implements the interface "klass".
1622 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1624 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1628 * Emit code which checks whenever the interface id of @klass is smaller than
1629 * than the value given by max_iid_reg.
1632 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1633 MonoBasicBlock *false_target)
1635 if (cfg->compile_aot) {
1636 int iid_reg = alloc_preg (cfg);
1637 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1638 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1648 /* Same as above, but obtains max_iid from a vtable */
1650 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1651 MonoBasicBlock *false_target)
1653 int max_iid_reg = alloc_preg (cfg);
1655 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1656 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1659 /* Same as above, but obtains max_iid from a klass */
1661 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1662 MonoBasicBlock *false_target)
1664 int max_iid_reg = alloc_preg (cfg);
1666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1667 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1671 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1673 int idepth_reg = alloc_preg (cfg);
1674 int stypes_reg = alloc_preg (cfg);
1675 int stype = alloc_preg (cfg);
1677 mono_class_setup_supertypes (klass);
1679 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1687 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1688 } else if (cfg->compile_aot) {
1689 int const_reg = alloc_preg (cfg);
1690 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1699 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1701 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1705 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 int intf_reg = alloc_preg (cfg);
1709 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1710 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1715 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1719 * Variant of the above that takes a register to the class, not the vtable.
1722 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1724 int intf_bit_reg = alloc_preg (cfg);
1726 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1727 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1732 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1736 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1739 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1741 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1742 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1744 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1748 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1750 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1754 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1756 if (cfg->compile_aot) {
1757 int const_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1759 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1767 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1770 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1773 int rank_reg = alloc_preg (cfg);
1774 int eclass_reg = alloc_preg (cfg);
1776 g_assert (!klass_inst);
1777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1780 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1782 if (klass->cast_class == mono_defaults.object_class) {
1783 int parent_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1785 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1786 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1787 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1788 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1789 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1790 } else if (klass->cast_class == mono_defaults.enum_class) {
1791 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1792 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1793 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1795 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1796 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1799 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1800 /* Check that the object is a vector too */
1801 int bounds_reg = alloc_preg (cfg);
1802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1804 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1807 int idepth_reg = alloc_preg (cfg);
1808 int stypes_reg = alloc_preg (cfg);
1809 int stype = alloc_preg (cfg);
1811 mono_class_setup_supertypes (klass);
1813 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1816 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1820 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1825 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1827 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1831 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1835 g_assert (val == 0);
1840 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1849 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1851 #if SIZEOF_REGISTER == 8
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1859 val_reg = alloc_preg (cfg);
1861 if (SIZEOF_REGISTER == 8)
1862 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1864 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1867 /* This could be optimized further if neccesary */
1869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1876 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1907 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1914 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1915 g_assert (size < 10000);
1918 /* This could be optimized further if neccesary */
1920 cur_reg = alloc_preg (cfg);
1921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1929 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1931 cur_reg = alloc_preg (cfg);
1932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1949 cur_reg = alloc_preg (cfg);
1950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1957 cur_reg = alloc_preg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1967 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1971 if (cfg->compile_aot) {
1972 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1973 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1975 ins->sreg2 = c->dreg;
1976 MONO_ADD_INS (cfg->cbb, ins);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1980 ins->inst_offset = mini_get_tls_offset (tls_key);
1981 MONO_ADD_INS (cfg->cbb, ins);
1988 * Emit IR to push the current LMF onto the LMF stack.
1991 emit_push_lmf (MonoCompile *cfg)
1994 * Emit IR to push the LMF:
1995 * lmf_addr = <lmf_addr from tls>
1996 * lmf->lmf_addr = lmf_addr
1997 * lmf->prev_lmf = *lmf_addr
2000 int lmf_reg, prev_lmf_reg;
2001 MonoInst *ins, *lmf_ins;
2006 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2007 /* Load current lmf */
2008 lmf_ins = mono_get_lmf_intrinsic (cfg);
2010 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2012 lmf_reg = ins->dreg;
2013 /* Save previous_lmf */
2014 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2016 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2019 * Store lmf_addr in a variable, so it can be allocated to a global register.
2021 if (!cfg->lmf_addr_var)
2022 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2025 ins = mono_get_jit_tls_intrinsic (cfg);
2027 int jit_tls_dreg = ins->dreg;
2029 MONO_ADD_INS (cfg->cbb, ins);
2030 lmf_reg = alloc_preg (cfg);
2031 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2033 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2036 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2038 MONO_ADD_INS (cfg->cbb, lmf_ins);
2041 MonoInst *args [16], *jit_tls_ins, *ins;
2043 /* Inline mono_get_lmf_addr () */
2044 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2046 /* Load mono_jit_tls_id */
2047 if (cfg->compile_aot)
2048 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2050 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2051 /* call pthread_getspecific () */
2052 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2053 /* lmf_addr = &jit_tls->lmf */
2054 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2057 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2061 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2063 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2064 lmf_reg = ins->dreg;
2066 prev_lmf_reg = alloc_preg (cfg);
2067 /* Save previous_lmf */
2068 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2069 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2071 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2078 * Emit IR to pop the current LMF from the LMF stack.
2081 emit_pop_lmf (MonoCompile *cfg)
2083 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2089 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2090 lmf_reg = ins->dreg;
2092 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2093 /* Load previous_lmf */
2094 prev_lmf_reg = alloc_preg (cfg);
2095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2097 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2100 * Emit IR to pop the LMF:
2101 * *(lmf->lmf_addr) = lmf->prev_lmf
2103 /* This could be called before emit_push_lmf () */
2104 if (!cfg->lmf_addr_var)
2105 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2106 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2108 prev_lmf_reg = alloc_preg (cfg);
2109 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2110 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2115 emit_instrumentation_call (MonoCompile *cfg, void *func)
2117 MonoInst *iargs [1];
2120 * Avoid instrumenting inlined methods since it can
2121 * distort profiling results.
2123 if (cfg->method != cfg->current_method)
2126 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2127 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2128 mono_emit_jit_icall (cfg, func, iargs);
2133 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2136 type = mini_get_underlying_type (type);
2137 switch (type->type) {
2138 case MONO_TYPE_VOID:
2139 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2146 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2150 case MONO_TYPE_FNPTR:
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2152 case MONO_TYPE_CLASS:
2153 case MONO_TYPE_STRING:
2154 case MONO_TYPE_OBJECT:
2155 case MONO_TYPE_SZARRAY:
2156 case MONO_TYPE_ARRAY:
2157 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2160 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2163 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2165 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2167 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2168 case MONO_TYPE_VALUETYPE:
2169 if (type->data.klass->enumtype) {
2170 type = mono_class_enum_basetype (type->data.klass);
2173 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2174 case MONO_TYPE_TYPEDBYREF:
2175 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2176 case MONO_TYPE_GENERICINST:
2177 type = &type->data.generic_class->container_class->byval_arg;
2180 case MONO_TYPE_MVAR:
2182 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2184 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2190 * target_type_is_incompatible:
2191 * @cfg: MonoCompile context
2193 * Check that the item @arg on the evaluation stack can be stored
2194 * in the target type (can be a local, or field, etc).
2195 * The cfg arg can be used to check if we need verification or just
2198 * Returns: non-0 value if arg can't be stored on a target.
2201 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2203 MonoType *simple_type;
2206 if (target->byref) {
2207 /* FIXME: check that the pointed to types match */
2208 if (arg->type == STACK_MP) {
2209 MonoClass *base_class = mono_class_from_mono_type (target);
2210 /* This is needed to handle gshared types + ldaddr */
2211 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2212 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2214 if (arg->type == STACK_PTR)
2219 simple_type = mini_get_underlying_type (target);
2220 switch (simple_type->type) {
2221 case MONO_TYPE_VOID:
2229 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2233 /* STACK_MP is needed when setting pinned locals */
2234 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2239 case MONO_TYPE_FNPTR:
2241 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2242 * in native int. (#688008).
2244 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2247 case MONO_TYPE_CLASS:
2248 case MONO_TYPE_STRING:
2249 case MONO_TYPE_OBJECT:
2250 case MONO_TYPE_SZARRAY:
2251 case MONO_TYPE_ARRAY:
2252 if (arg->type != STACK_OBJ)
2254 /* FIXME: check type compatibility */
2258 if (arg->type != STACK_I8)
2262 if (arg->type != cfg->r4_stack_type)
2266 if (arg->type != STACK_R8)
2269 case MONO_TYPE_VALUETYPE:
2270 if (arg->type != STACK_VTYPE)
2272 klass = mono_class_from_mono_type (simple_type);
2273 if (klass != arg->klass)
2276 case MONO_TYPE_TYPEDBYREF:
2277 if (arg->type != STACK_VTYPE)
2279 klass = mono_class_from_mono_type (simple_type);
2280 if (klass != arg->klass)
2283 case MONO_TYPE_GENERICINST:
2284 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2285 MonoClass *target_class;
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 target_class = mono_class_from_mono_type (target);
2290 /* The second cases is needed when doing partial sharing */
2291 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2295 if (arg->type != STACK_OBJ)
2297 /* FIXME: check type compatibility */
2301 case MONO_TYPE_MVAR:
2302 g_assert (cfg->gshared);
2303 if (mini_type_var_is_vt (simple_type)) {
2304 if (arg->type != STACK_VTYPE)
2307 if (arg->type != STACK_OBJ)
2312 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2318 * Prepare arguments for passing to a function call.
2319 * Return a non-zero value if the arguments can't be passed to the given
2321 * The type checks are not yet complete and some conversions may need
2322 * casts on 32 or 64 bit architectures.
2324 * FIXME: implement this using target_type_is_incompatible ()
2327 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2329 MonoType *simple_type;
2333 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2337 for (i = 0; i < sig->param_count; ++i) {
2338 if (sig->params [i]->byref) {
2339 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2343 simple_type = mini_get_underlying_type (sig->params [i]);
2345 switch (simple_type->type) {
2346 case MONO_TYPE_VOID:
2355 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2361 case MONO_TYPE_FNPTR:
2362 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2365 case MONO_TYPE_CLASS:
2366 case MONO_TYPE_STRING:
2367 case MONO_TYPE_OBJECT:
2368 case MONO_TYPE_SZARRAY:
2369 case MONO_TYPE_ARRAY:
2370 if (args [i]->type != STACK_OBJ)
2375 if (args [i]->type != STACK_I8)
2379 if (args [i]->type != cfg->r4_stack_type)
2383 if (args [i]->type != STACK_R8)
2386 case MONO_TYPE_VALUETYPE:
2387 if (simple_type->data.klass->enumtype) {
2388 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2391 if (args [i]->type != STACK_VTYPE)
2394 case MONO_TYPE_TYPEDBYREF:
2395 if (args [i]->type != STACK_VTYPE)
2398 case MONO_TYPE_GENERICINST:
2399 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2402 case MONO_TYPE_MVAR:
2404 if (args [i]->type != STACK_VTYPE)
2408 g_error ("unknown type 0x%02x in check_call_signature",
2416 callvirt_to_call (int opcode)
2419 case OP_CALL_MEMBASE:
2421 case OP_VOIDCALL_MEMBASE:
2423 case OP_FCALL_MEMBASE:
2425 case OP_RCALL_MEMBASE:
2427 case OP_VCALL_MEMBASE:
2429 case OP_LCALL_MEMBASE:
2432 g_assert_not_reached ();
2439 callvirt_to_call_reg (int opcode)
2442 case OP_CALL_MEMBASE:
2444 case OP_VOIDCALL_MEMBASE:
2445 return OP_VOIDCALL_REG;
2446 case OP_FCALL_MEMBASE:
2447 return OP_FCALL_REG;
2448 case OP_RCALL_MEMBASE:
2449 return OP_RCALL_REG;
2450 case OP_VCALL_MEMBASE:
2451 return OP_VCALL_REG;
2452 case OP_LCALL_MEMBASE:
2453 return OP_LCALL_REG;
2455 g_assert_not_reached ();
2461 /* Either METHOD or IMT_ARG needs to be set */
2463 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2467 if (COMPILE_LLVM (cfg)) {
2469 method_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2472 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2473 method_reg = ins->dreg;
2477 call->imt_arg_reg = method_reg;
2479 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2484 method_reg = alloc_preg (cfg);
2485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2487 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2488 method_reg = ins->dreg;
2491 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2494 static MonoJumpInfo *
2495 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2497 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2501 ji->data.target = target;
2507 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2510 return mono_class_check_context_used (klass);
2516 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2519 return mono_method_check_context_used (method);
2525 * check_method_sharing:
2527 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2530 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2532 gboolean pass_vtable = FALSE;
2533 gboolean pass_mrgctx = FALSE;
2535 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2536 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2537 gboolean sharable = FALSE;
2539 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2543 * Pass vtable iff target method might
2544 * be shared, which means that sharing
2545 * is enabled for its class and its
2546 * context is sharable (and it's not a
2549 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2553 if (mini_method_get_context (cmethod) &&
2554 mini_method_get_context (cmethod)->method_inst) {
2555 g_assert (!pass_vtable);
2557 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2560 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2565 if (out_pass_vtable)
2566 *out_pass_vtable = pass_vtable;
2567 if (out_pass_mrgctx)
2568 *out_pass_mrgctx = pass_mrgctx;
2571 inline static MonoCallInst *
2572 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2573 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2577 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2585 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2587 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2589 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2592 call->signature = sig;
2593 call->rgctx_reg = rgctx;
2594 sig_ret = mini_get_underlying_type (sig->ret);
2596 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2599 if (mini_type_is_vtype (sig_ret)) {
2600 call->vret_var = cfg->vret_addr;
2601 //g_assert_not_reached ();
2603 } else if (mini_type_is_vtype (sig_ret)) {
2604 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2607 temp->backend.is_pinvoke = sig->pinvoke;
2610 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2611 * address of return value to increase optimization opportunities.
2612 * Before vtype decomposition, the dreg of the call ins itself represents the
2613 * fact the call modifies the return value. After decomposition, the call will
2614 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2615 * will be transformed into an LDADDR.
2617 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2618 loada->dreg = alloc_preg (cfg);
2619 loada->inst_p0 = temp;
2620 /* We reference the call too since call->dreg could change during optimization */
2621 loada->inst_p1 = call;
2622 MONO_ADD_INS (cfg->cbb, loada);
2624 call->inst.dreg = temp->dreg;
2626 call->vret_var = loada;
2627 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2628 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2630 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2631 if (COMPILE_SOFT_FLOAT (cfg)) {
2633 * If the call has a float argument, we would need to do an r8->r4 conversion using
2634 * an icall, but that cannot be done during the call sequence since it would clobber
2635 * the call registers + the stack. So we do it before emitting the call.
2637 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2639 MonoInst *in = call->args [i];
2641 if (i >= sig->hasthis)
2642 t = sig->params [i - sig->hasthis];
2644 t = &mono_defaults.int_class->byval_arg;
2645 t = mono_type_get_underlying_type (t);
2647 if (!t->byref && t->type == MONO_TYPE_R4) {
2648 MonoInst *iargs [1];
2652 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2654 /* The result will be in an int vreg */
2655 call->args [i] = conv;
2661 call->need_unbox_trampoline = unbox_trampoline;
2664 if (COMPILE_LLVM (cfg))
2665 mono_llvm_emit_call (cfg, call);
2667 mono_arch_emit_call (cfg, call);
2669 mono_arch_emit_call (cfg, call);
2672 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2673 cfg->flags |= MONO_CFG_HAS_CALLS;
2679 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2681 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2682 cfg->uses_rgctx_reg = TRUE;
2683 call->rgctx_reg = TRUE;
2685 call->rgctx_arg_reg = rgctx_reg;
2689 inline static MonoInst*
2690 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2695 gboolean check_sp = FALSE;
2697 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2698 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2700 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2705 rgctx_reg = mono_alloc_preg (cfg);
2706 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2710 if (!cfg->stack_inbalance_var)
2711 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2713 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 ins->dreg = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2718 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2720 call->inst.sreg1 = addr->dreg;
2723 emit_imt_argument (cfg, call, NULL, imt_arg);
2725 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2730 sp_reg = mono_alloc_preg (cfg);
2732 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2734 MONO_ADD_INS (cfg->cbb, ins);
2736 /* Restore the stack so we don't crash when throwing the exception */
2737 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2738 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2739 MONO_ADD_INS (cfg->cbb, ins);
2741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2742 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2746 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2748 return (MonoInst*)call;
2752 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2755 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2757 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2760 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2761 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2763 #ifndef DISABLE_REMOTING
2764 gboolean might_be_remote = FALSE;
2766 gboolean virtual_ = this_ins != NULL;
2767 gboolean enable_for_aot = TRUE;
2770 MonoInst *call_target = NULL;
2772 gboolean need_unbox_trampoline;
2775 sig = mono_method_signature (method);
2777 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2778 g_assert_not_reached ();
2781 rgctx_reg = mono_alloc_preg (cfg);
2782 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2785 if (method->string_ctor) {
2786 /* Create the real signature */
2787 /* FIXME: Cache these */
2788 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2789 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2794 context_used = mini_method_check_context_used (cfg, method);
2796 #ifndef DISABLE_REMOTING
2797 might_be_remote = this_ins && sig->hasthis &&
2798 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2799 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2801 if (might_be_remote && context_used) {
2804 g_assert (cfg->gshared);
2806 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2808 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2812 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2813 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2815 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2817 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2819 #ifndef DISABLE_REMOTING
2820 if (might_be_remote)
2821 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2824 call->method = method;
2825 call->inst.flags |= MONO_INST_HAS_METHOD;
2826 call->inst.inst_left = this_ins;
2827 call->tail_call = tail;
2830 int vtable_reg, slot_reg, this_reg;
2833 this_reg = this_ins->dreg;
2835 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2836 MonoInst *dummy_use;
2838 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2840 /* Make a call to delegate->invoke_impl */
2841 call->inst.inst_basereg = this_reg;
2842 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2845 /* We must emit a dummy use here because the delegate trampoline will
2846 replace the 'this' argument with the delegate target making this activation
2847 no longer a root for the delegate.
2848 This is an issue for delegates that target collectible code such as dynamic
2849 methods of GC'able assemblies.
2851 For a test case look into #667921.
2853 FIXME: a dummy use is not the best way to do it as the local register allocator
2854 will put it on a caller save register and spil it around the call.
2855 Ideally, we would either put it on a callee save register or only do the store part.
2857 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2859 return (MonoInst*)call;
2862 if ((!cfg->compile_aot || enable_for_aot) &&
2863 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2864 (MONO_METHOD_IS_FINAL (method) &&
2865 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2866 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2868 * the method is not virtual, we just need to ensure this is not null
2869 * and then we can call the method directly.
2871 #ifndef DISABLE_REMOTING
2872 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2874 * The check above ensures method is not gshared, this is needed since
2875 * gshared methods can't have wrappers.
2877 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2881 if (!method->string_ctor)
2882 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2884 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2885 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2887 * the method is virtual, but we can statically dispatch since either
2888 * it's class or the method itself are sealed.
2889 * But first we need to ensure it's not a null reference.
2891 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2893 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2894 } else if (call_target) {
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2898 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2899 call->inst.sreg1 = call_target->dreg;
2900 call->inst.flags &= !MONO_INST_HAS_METHOD;
2902 vtable_reg = alloc_preg (cfg);
2903 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2904 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2905 guint32 imt_slot = mono_method_get_imt_slot (method);
2906 emit_imt_argument (cfg, call, call->method, imt_arg);
2907 slot_reg = vtable_reg;
2908 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2910 slot_reg = vtable_reg;
2911 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2912 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2914 g_assert (mono_method_signature (method)->generic_param_count);
2915 emit_imt_argument (cfg, call, call->method, imt_arg);
2919 call->inst.sreg1 = slot_reg;
2920 call->inst.inst_offset = offset;
2921 call->is_virtual = TRUE;
2925 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2928 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2930 return (MonoInst*)call;
2934 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2936 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2940 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2947 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2950 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2952 return (MonoInst*)call;
2956 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2958 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2962 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2966 * mono_emit_abs_call:
2968 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2970 inline static MonoInst*
2971 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2972 MonoMethodSignature *sig, MonoInst **args)
2974 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2978 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2981 if (cfg->abs_patches == NULL)
2982 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2983 g_hash_table_insert (cfg->abs_patches, ji, ji);
2984 ins = mono_emit_native_call (cfg, ji, sig, args);
2985 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2989 static MonoMethodSignature*
2990 sig_to_rgctx_sig (MonoMethodSignature *sig)
2992 // FIXME: memory allocation
2993 MonoMethodSignature *res;
2996 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2997 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2998 res->param_count = sig->param_count + 1;
2999 for (i = 0; i < sig->param_count; ++i)
3000 res->params [i] = sig->params [i];
3001 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3005 /* Make an indirect call to FSIG passing an additional argument */
3007 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3009 MonoMethodSignature *csig;
3010 MonoInst *args_buf [16];
3012 int i, pindex, tmp_reg;
3014 /* Make a call with an rgctx/extra arg */
3015 if (fsig->param_count + 2 < 16)
3018 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3021 args [pindex ++] = orig_args [0];
3022 for (i = 0; i < fsig->param_count; ++i)
3023 args [pindex ++] = orig_args [fsig->hasthis + i];
3024 tmp_reg = alloc_preg (cfg);
3025 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3026 csig = sig_to_rgctx_sig (fsig);
3027 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3030 /* Emit an indirect call to the function descriptor ADDR */
3032 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3034 int addr_reg, arg_reg;
3035 MonoInst *call_target;
3037 g_assert (cfg->llvm_only);
3040 * addr points to a <addr, arg> pair, load both of them, and
3041 * make a call to addr, passing arg as an extra arg.
3043 addr_reg = alloc_preg (cfg);
3044 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3045 arg_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3048 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3052 direct_icalls_enabled (MonoCompile *cfg)
3054 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3056 if (cfg->compile_llvm)
3059 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3065 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3068 * Call the jit icall without a wrapper if possible.
3069 * The wrapper is needed for the following reasons:
3070 * - to handle exceptions thrown using mono_raise_exceptions () from the
3071 * icall function. The EH code needs the lmf frame pushed by the
3072 * wrapper to be able to unwind back to managed code.
3073 * - to be able to do stack walks for asynchronously suspended
3074 * threads when debugging.
3076 if (info->no_raise && direct_icalls_enabled (cfg)) {
3080 if (!info->wrapper_method) {
3081 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3082 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3084 mono_memory_barrier ();
3088 * Inline the wrapper method, which is basically a call to the C icall, and
3089 * an exception check.
3091 costs = inline_method (cfg, info->wrapper_method, NULL,
3092 args, NULL, cfg->real_offset, TRUE);
3093 g_assert (costs > 0);
3094 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3103 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3105 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3106 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3110 * Native code might return non register sized integers
3111 * without initializing the upper bits.
3113 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3114 case OP_LOADI1_MEMBASE:
3115 widen_op = OP_ICONV_TO_I1;
3117 case OP_LOADU1_MEMBASE:
3118 widen_op = OP_ICONV_TO_U1;
3120 case OP_LOADI2_MEMBASE:
3121 widen_op = OP_ICONV_TO_I2;
3123 case OP_LOADU2_MEMBASE:
3124 widen_op = OP_ICONV_TO_U2;
3130 if (widen_op != -1) {
3131 int dreg = alloc_preg (cfg);
3134 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3135 widen->type = ins->type;
3145 get_memcpy_method (void)
3147 static MonoMethod *memcpy_method = NULL;
3148 if (!memcpy_method) {
3149 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3151 g_error ("Old corlib found. Install a new one");
3153 return memcpy_method;
3157 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3159 MonoClassField *field;
3160 gpointer iter = NULL;
3162 while ((field = mono_class_get_fields (klass, &iter))) {
3165 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3167 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3168 if (mini_type_is_reference (mono_field_get_type (field))) {
3169 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3170 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3172 MonoClass *field_class = mono_class_from_mono_type (field->type);
3173 if (field_class->has_references)
3174 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3180 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3182 int card_table_shift_bits;
3183 gpointer card_table_mask;
3185 MonoInst *dummy_use;
3186 int nursery_shift_bits;
3187 size_t nursery_size;
3189 if (!cfg->gen_write_barriers)
3192 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3194 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3196 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3199 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3200 wbarrier->sreg1 = ptr->dreg;
3201 wbarrier->sreg2 = value->dreg;
3202 MONO_ADD_INS (cfg->cbb, wbarrier);
3203 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3204 int offset_reg = alloc_preg (cfg);
3208 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3209 if (card_table_mask)
3210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3212 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3213 * IMM's larger than 32bits.
3215 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3216 card_reg = ins->dreg;
3218 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3219 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3221 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3222 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3225 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3229 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3231 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3232 unsigned need_wb = 0;
3237 /*types with references can't have alignment smaller than sizeof(void*) */
3238 if (align < SIZEOF_VOID_P)
3241 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3242 if (size > 32 * SIZEOF_VOID_P)
3245 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3247 /* We don't unroll more than 5 stores to avoid code bloat. */
3248 if (size > 5 * SIZEOF_VOID_P) {
3249 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3250 size += (SIZEOF_VOID_P - 1);
3251 size &= ~(SIZEOF_VOID_P - 1);
3253 EMIT_NEW_ICONST (cfg, iargs [2], size);
3254 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3255 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3259 destreg = iargs [0]->dreg;
3260 srcreg = iargs [1]->dreg;
3263 dest_ptr_reg = alloc_preg (cfg);
3264 tmp_reg = alloc_preg (cfg);
3267 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3269 while (size >= SIZEOF_VOID_P) {
3270 MonoInst *load_inst;
3271 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3272 load_inst->dreg = tmp_reg;
3273 load_inst->inst_basereg = srcreg;
3274 load_inst->inst_offset = offset;
3275 MONO_ADD_INS (cfg->cbb, load_inst);
3277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3280 emit_write_barrier (cfg, iargs [0], load_inst);
3282 offset += SIZEOF_VOID_P;
3283 size -= SIZEOF_VOID_P;
3286 /*tmp += sizeof (void*)*/
3287 if (size >= SIZEOF_VOID_P) {
3288 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3289 MONO_ADD_INS (cfg->cbb, iargs [0]);
3293 /* Those cannot be references since size < sizeof (void*) */
3295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3302 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3309 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3310 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3319 * Emit code to copy a valuetype of type @klass whose address is stored in
3320 * @src->dreg to memory whose address is stored at @dest->dreg.
3323 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3325 MonoInst *iargs [4];
3328 MonoMethod *memcpy_method;
3329 MonoInst *size_ins = NULL;
3330 MonoInst *memcpy_ins = NULL;
3334 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3337 * This check breaks with spilled vars... need to handle it during verification anyway.
3338 * g_assert (klass && klass == src->klass && klass == dest->klass);
3341 if (mini_is_gsharedvt_klass (klass)) {
3343 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3344 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3348 n = mono_class_native_size (klass, &align);
3350 n = mono_class_value_size (klass, &align);
3352 /* if native is true there should be no references in the struct */
3353 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3354 /* Avoid barriers when storing to the stack */
3355 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3356 (dest->opcode == OP_LDADDR))) {
3362 context_used = mini_class_check_context_used (cfg, klass);
3364 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3365 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3367 } else if (context_used) {
3368 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3370 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3371 if (!cfg->compile_aot)
3372 mono_class_compute_gc_descriptor (klass);
3376 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3378 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3383 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3384 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3385 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3390 iargs [2] = size_ins;
3392 EMIT_NEW_ICONST (cfg, iargs [2], n);
3394 memcpy_method = get_memcpy_method ();
3396 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3398 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3403 get_memset_method (void)
3405 static MonoMethod *memset_method = NULL;
3406 if (!memset_method) {
3407 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3409 g_error ("Old corlib found. Install a new one");
3411 return memset_method;
3415 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3417 MonoInst *iargs [3];
3420 MonoMethod *memset_method;
3421 MonoInst *size_ins = NULL;
3422 MonoInst *bzero_ins = NULL;
3423 static MonoMethod *bzero_method;
3425 /* FIXME: Optimize this for the case when dest is an LDADDR */
3426 mono_class_init (klass);
3427 if (mini_is_gsharedvt_klass (klass)) {
3428 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3429 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3431 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3432 g_assert (bzero_method);
3434 iargs [1] = size_ins;
3435 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3439 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3441 n = mono_class_value_size (klass, &align);
3443 if (n <= sizeof (gpointer) * 8) {
3444 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3447 memset_method = get_memset_method ();
3449 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3450 EMIT_NEW_ICONST (cfg, iargs [2], n);
3451 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3458 * Emit IR to return either the this pointer for instance method,
3459 * or the mrgctx for static methods.
3462 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3464 MonoInst *this_ins = NULL;
3466 g_assert (cfg->gshared);
3468 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3469 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3470 !method->klass->valuetype)
3471 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3473 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3474 MonoInst *mrgctx_loc, *mrgctx_var;
3476 g_assert (!this_ins);
3477 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3479 mrgctx_loc = mono_get_vtable_var (cfg);
3480 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3483 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3484 MonoInst *vtable_loc, *vtable_var;
3486 g_assert (!this_ins);
3488 vtable_loc = mono_get_vtable_var (cfg);
3489 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3491 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3492 MonoInst *mrgctx_var = vtable_var;
3495 vtable_reg = alloc_preg (cfg);
3496 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3497 vtable_var->type = STACK_PTR;
3505 vtable_reg = alloc_preg (cfg);
3506 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3511 static MonoJumpInfoRgctxEntry *
3512 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3514 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3515 res->method = method;
3516 res->in_mrgctx = in_mrgctx;
3517 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3518 res->data->type = patch_type;
3519 res->data->data.target = patch_data;
3520 res->info_type = info_type;
3525 static inline MonoInst*
3526 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3528 MonoInst *args [16];
3531 // FIXME: No fastpath since the slot is not a compile time constant
3533 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3534 if (entry->in_mrgctx)
3535 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3537 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3541 * FIXME: This can be called during decompose, which is a problem since it creates
3543 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3545 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3547 MonoBasicBlock *is_null_bb, *end_bb;
3548 MonoInst *res, *ins, *call;
3551 slot = mini_get_rgctx_entry_slot (entry);
3553 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3554 index = MONO_RGCTX_SLOT_INDEX (slot);
3556 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3557 for (depth = 0; ; ++depth) {
3558 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3560 if (index < size - 1)
3565 NEW_BBLOCK (cfg, end_bb);
3566 NEW_BBLOCK (cfg, is_null_bb);
3569 rgctx_reg = rgctx->dreg;
3571 rgctx_reg = alloc_preg (cfg);
3573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3574 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3575 NEW_BBLOCK (cfg, is_null_bb);
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3581 for (i = 0; i < depth; ++i) {
3582 int array_reg = alloc_preg (cfg);
3584 /* load ptr to next array */
3585 if (mrgctx && i == 0)
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3589 rgctx_reg = array_reg;
3590 /* is the ptr null? */
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3592 /* if yes, jump to actual trampoline */
3593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3597 val_reg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3599 /* is the slot null? */
3600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3601 /* if yes, jump to actual trampoline */
3602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3605 res_reg = alloc_preg (cfg);
3606 MONO_INST_NEW (cfg, ins, OP_MOVE);
3607 ins->dreg = res_reg;
3608 ins->sreg1 = val_reg;
3609 MONO_ADD_INS (cfg->cbb, ins);
3611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3614 MONO_START_BB (cfg, is_null_bb);
3616 EMIT_NEW_ICONST (cfg, args [1], index);
3618 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3620 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3621 MONO_INST_NEW (cfg, ins, OP_MOVE);
3622 ins->dreg = res_reg;
3623 ins->sreg1 = call->dreg;
3624 MONO_ADD_INS (cfg->cbb, ins);
3625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3627 MONO_START_BB (cfg, end_bb);
3636 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3639 static inline MonoInst*
3640 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3643 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3645 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3649 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3650 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3652 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3653 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3655 return emit_rgctx_fetch (cfg, rgctx, entry);
3659 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3660 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3662 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3663 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3665 return emit_rgctx_fetch (cfg, rgctx, entry);
3669 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3670 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3672 MonoJumpInfoGSharedVtCall *call_info;
3673 MonoJumpInfoRgctxEntry *entry;
3676 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3677 call_info->sig = sig;
3678 call_info->method = cmethod;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_virt_method:
3689 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3692 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3693 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3695 MonoJumpInfoVirtMethod *info;
3696 MonoJumpInfoRgctxEntry *entry;
3699 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3700 info->klass = klass;
3701 info->method = virt_method;
3703 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3704 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3706 return emit_rgctx_fetch (cfg, rgctx, entry);
3710 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3711 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3713 MonoJumpInfoRgctxEntry *entry;
3716 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3717 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 * emit_get_rgctx_method:
3725 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3726 * normal constants, else emit a load from the rgctx.
3729 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3730 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3732 if (!context_used) {
3735 switch (rgctx_type) {
3736 case MONO_RGCTX_INFO_METHOD:
3737 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3739 case MONO_RGCTX_INFO_METHOD_RGCTX:
3740 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3743 g_assert_not_reached ();
3746 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3747 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3749 return emit_rgctx_fetch (cfg, rgctx, entry);
3754 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3755 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3757 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3758 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3760 return emit_rgctx_fetch (cfg, rgctx, entry);
3764 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3766 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3767 MonoRuntimeGenericContextInfoTemplate *template_;
3772 for (i = 0; i < info->num_entries; ++i) {
3773 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3775 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3779 if (info->num_entries == info->count_entries) {
3780 MonoRuntimeGenericContextInfoTemplate *new_entries;
3781 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3783 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3785 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3786 info->entries = new_entries;
3787 info->count_entries = new_count_entries;
3790 idx = info->num_entries;
3791 template_ = &info->entries [idx];
3792 template_->info_type = rgctx_type;
3793 template_->data = data;
3795 info->num_entries ++;
3801 * emit_get_gsharedvt_info:
3803 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3806 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3811 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3812 /* Load info->entries [idx] */
3813 dreg = alloc_preg (cfg);
3814 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3820 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3822 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3826 * On return the caller must check @klass for load errors.
3829 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3831 MonoInst *vtable_arg;
3834 context_used = mini_class_check_context_used (cfg, klass);
3837 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3838 klass, MONO_RGCTX_INFO_VTABLE);
3840 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3844 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3847 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3851 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3852 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3854 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3855 ins->sreg1 = vtable_arg->dreg;
3856 MONO_ADD_INS (cfg->cbb, ins);
3858 static int byte_offset = -1;
3859 static guint8 bitmask;
3860 int bits_reg, inited_reg;
3861 MonoBasicBlock *inited_bb;
3862 MonoInst *args [16];
3864 if (byte_offset < 0)
3865 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3867 bits_reg = alloc_ireg (cfg);
3868 inited_reg = alloc_ireg (cfg);
3870 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3873 NEW_BBLOCK (cfg, inited_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3878 args [0] = vtable_arg;
3879 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3881 MONO_START_BB (cfg, inited_bb);
3886 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3890 if (cfg->gen_seq_points && cfg->method == method) {
3891 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3893 ins->flags |= MONO_INST_NONEMPTY_STACK;
3894 MONO_ADD_INS (cfg->cbb, ins);
3899 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3901 if (mini_get_debug_options ()->better_cast_details) {
3902 int vtable_reg = alloc_preg (cfg);
3903 int klass_reg = alloc_preg (cfg);
3904 MonoBasicBlock *is_null_bb = NULL;
3906 int to_klass_reg, context_used;
3909 NEW_BBLOCK (cfg, is_null_bb);
3911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3912 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3915 tls_get = mono_get_jit_tls_intrinsic (cfg);
3917 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3921 MONO_ADD_INS (cfg->cbb, tls_get);
3922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3927 context_used = mini_class_check_context_used (cfg, klass);
3929 MonoInst *class_ins;
3931 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3932 to_klass_reg = class_ins->dreg;
3934 to_klass_reg = alloc_preg (cfg);
3935 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3940 MONO_START_BB (cfg, is_null_bb);
3945 reset_cast_details (MonoCompile *cfg)
3947 /* Reset the variables holding the cast details */
3948 if (mini_get_debug_options ()->better_cast_details) {
3949 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3951 MONO_ADD_INS (cfg->cbb, tls_get);
3952 /* It is enough to reset the from field */
3953 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3958 * On return the caller must check @array_class for load errors
3961 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3963 int vtable_reg = alloc_preg (cfg);
3966 context_used = mini_class_check_context_used (cfg, array_class);
3968 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3970 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3972 if (cfg->opt & MONO_OPT_SHARED) {
3973 int class_reg = alloc_preg (cfg);
3976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3977 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3978 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3979 } else if (context_used) {
3980 MonoInst *vtable_ins;
3982 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3983 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3985 if (cfg->compile_aot) {
3989 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3991 vt_reg = alloc_preg (cfg);
3992 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3993 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3996 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4002 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4004 reset_cast_details (cfg);
4008 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4009 * generic code is generated.
4012 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4014 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4017 MonoInst *rgctx, *addr;
4019 /* FIXME: What if the class is shared? We might not
4020 have to get the address of the method from the
4022 addr = emit_get_rgctx_method (cfg, context_used, method,
4023 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4024 if (cfg->llvm_only && cfg->gsharedvt) {
4025 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4027 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4029 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4032 gboolean pass_vtable, pass_mrgctx;
4033 MonoInst *rgctx_arg = NULL;
4035 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4036 g_assert (!pass_mrgctx);
4039 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4042 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4045 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4050 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4054 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4055 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4056 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4057 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4059 obj_reg = sp [0]->dreg;
4060 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4061 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4063 /* FIXME: generics */
4064 g_assert (klass->rank == 0);
4067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4068 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4074 MonoInst *element_class;
4076 /* This assertion is from the unboxcast insn */
4077 g_assert (klass->rank == 0);
4079 element_class = emit_get_rgctx_klass (cfg, context_used,
4080 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4082 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4083 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4085 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4086 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4087 reset_cast_details (cfg);
4090 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4091 MONO_ADD_INS (cfg->cbb, add);
4092 add->type = STACK_MP;
4099 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4101 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4102 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4106 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4112 args [1] = klass_inst;
4115 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4117 NEW_BBLOCK (cfg, is_ref_bb);
4118 NEW_BBLOCK (cfg, is_nullable_bb);
4119 NEW_BBLOCK (cfg, end_bb);
4120 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4127 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4128 addr_reg = alloc_dreg (cfg, STACK_MP);
4132 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4133 MONO_ADD_INS (cfg->cbb, addr);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4138 MONO_START_BB (cfg, is_ref_bb);
4140 /* Save the ref to a temporary */
4141 dreg = alloc_ireg (cfg);
4142 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4143 addr->dreg = addr_reg;
4144 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4148 MONO_START_BB (cfg, is_nullable_bb);
4151 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4152 MonoInst *unbox_call;
4153 MonoMethodSignature *unbox_sig;
4155 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4156 unbox_sig->ret = &klass->byval_arg;
4157 unbox_sig->param_count = 1;
4158 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4161 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4163 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4165 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4166 addr->dreg = addr_reg;
4169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4172 MONO_START_BB (cfg, end_bb);
4175 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4181 * Returns NULL and set the cfg exception on error.
4184 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4186 MonoInst *iargs [2];
4191 MonoRgctxInfoType rgctx_info;
4192 MonoInst *iargs [2];
4193 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4195 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4197 if (cfg->opt & MONO_OPT_SHARED)
4198 rgctx_info = MONO_RGCTX_INFO_KLASS;
4200 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4201 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4203 if (cfg->opt & MONO_OPT_SHARED) {
4204 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4206 alloc_ftn = ves_icall_object_new;
4209 alloc_ftn = ves_icall_object_new_specific;
4212 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4213 if (known_instance_size) {
4214 int size = mono_class_instance_size (klass);
4215 if (size < sizeof (MonoObject))
4216 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4218 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4220 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4223 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4226 if (cfg->opt & MONO_OPT_SHARED) {
4227 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4228 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4230 alloc_ftn = ves_icall_object_new;
4231 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4232 /* This happens often in argument checking code, eg. throw new FooException... */
4233 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4234 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4235 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4237 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4238 MonoMethod *managed_alloc = NULL;
4242 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4243 cfg->exception_ptr = klass;
4247 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4249 if (managed_alloc) {
4250 int size = mono_class_instance_size (klass);
4251 if (size < sizeof (MonoObject))
4252 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4254 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4255 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4256 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4258 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4260 guint32 lw = vtable->klass->instance_size;
4261 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4262 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4263 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4266 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4270 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4274 * Returns NULL and set the cfg exception on error.
4277 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4279 MonoInst *alloc, *ins;
4281 if (mono_class_is_nullable (klass)) {
4282 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4285 if (cfg->llvm_only && cfg->gsharedvt) {
4286 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4287 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4288 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4290 /* FIXME: What if the class is shared? We might not
4291 have to get the method address from the RGCTX. */
4292 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4293 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4294 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4296 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4299 gboolean pass_vtable, pass_mrgctx;
4300 MonoInst *rgctx_arg = NULL;
4302 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4303 g_assert (!pass_mrgctx);
4306 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4309 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4312 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4316 if (mini_is_gsharedvt_klass (klass)) {
4317 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4318 MonoInst *res, *is_ref, *src_var, *addr;
4321 dreg = alloc_ireg (cfg);
4323 NEW_BBLOCK (cfg, is_ref_bb);
4324 NEW_BBLOCK (cfg, is_nullable_bb);
4325 NEW_BBLOCK (cfg, end_bb);
4326 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4334 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4338 ins->opcode = OP_STOREV_MEMBASE;
4340 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4341 res->type = STACK_OBJ;
4343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4346 MONO_START_BB (cfg, is_ref_bb);
4348 /* val is a vtype, so has to load the value manually */
4349 src_var = get_vreg_to_inst (cfg, val->dreg);
4351 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4352 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4357 MONO_START_BB (cfg, is_nullable_bb);
4360 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4361 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4363 MonoMethodSignature *box_sig;
4366 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4367 * construct that method at JIT time, so have to do things by hand.
4369 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4370 box_sig->ret = &mono_defaults.object_class->byval_arg;
4371 box_sig->param_count = 1;
4372 box_sig->params [0] = &klass->byval_arg;
4375 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4377 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4378 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4379 res->type = STACK_OBJ;
4383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4385 MONO_START_BB (cfg, end_bb);
4389 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4393 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4399 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4402 MonoGenericContainer *container;
4403 MonoGenericInst *ginst;
4405 if (klass->generic_class) {
4406 container = klass->generic_class->container_class->generic_container;
4407 ginst = klass->generic_class->context.class_inst;
4408 } else if (klass->generic_container && context_used) {
4409 container = klass->generic_container;
4410 ginst = container->context.class_inst;
4415 for (i = 0; i < container->type_argc; ++i) {
4417 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4419 type = ginst->type_argv [i];
4420 if (mini_type_is_reference (type))
4426 static GHashTable* direct_icall_type_hash;
4429 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4431 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4432 if (!direct_icalls_enabled (cfg))
4436 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4437 * Whitelist a few icalls for now.
4439 if (!direct_icall_type_hash) {
4440 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4442 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4443 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4444 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4445 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4446 mono_memory_barrier ();
4447 direct_icall_type_hash = h;
4450 if (cmethod->klass == mono_defaults.math_class)
4452 /* No locking needed */
4453 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4458 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4461 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4463 MonoMethod *mono_castclass;
4466 mono_castclass = mono_marshal_get_castclass_with_cache ();
4468 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4469 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4470 reset_cast_details (cfg);
4476 get_castclass_cache_idx (MonoCompile *cfg)
4478 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4479 cfg->castclass_cache_index ++;
4480 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4484 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4493 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4496 idx = get_castclass_cache_idx (cfg);
4497 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4499 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4500 return emit_castclass_with_cache (cfg, klass, args);
4504 * Returns NULL and set the cfg exception on error.
4507 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4509 MonoBasicBlock *is_null_bb;
4510 int obj_reg = src->dreg;
4511 int vtable_reg = alloc_preg (cfg);
4513 MonoInst *klass_inst = NULL, *res;
4515 context_used = mini_class_check_context_used (cfg, klass);
4517 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4518 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4519 (*inline_costs) += 2;
4521 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4522 MonoMethod *mono_castclass;
4523 MonoInst *iargs [1];
4526 mono_castclass = mono_marshal_get_castclass (klass);
4529 save_cast_details (cfg, klass, src->dreg, TRUE);
4530 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4531 iargs, ip, cfg->real_offset, TRUE);
4532 reset_cast_details (cfg);
4533 CHECK_CFG_EXCEPTION;
4534 g_assert (costs > 0);
4536 cfg->real_offset += 5;
4538 (*inline_costs) += costs;
4546 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4547 MonoInst *cache_ins;
4549 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4554 /* klass - it's the second element of the cache entry*/
4555 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4558 args [2] = cache_ins;
4560 return emit_castclass_with_cache (cfg, klass, args);
4563 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4566 NEW_BBLOCK (cfg, is_null_bb);
4568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4571 save_cast_details (cfg, klass, obj_reg, FALSE);
4573 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4575 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4577 int klass_reg = alloc_preg (cfg);
4579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4581 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4582 /* the remoting code is broken, access the class for now */
4583 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4584 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4586 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4587 cfg->exception_ptr = klass;
4590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4595 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4598 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4602 MONO_START_BB (cfg, is_null_bb);
4604 reset_cast_details (cfg);
4613 * Returns NULL and set the cfg exception on error.
4616 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4619 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4620 int obj_reg = src->dreg;
4621 int vtable_reg = alloc_preg (cfg);
4622 int res_reg = alloc_ireg_ref (cfg);
4623 MonoInst *klass_inst = NULL;
4628 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4629 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4630 MonoInst *cache_ins;
4632 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4637 /* klass - it's the second element of the cache entry*/
4638 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4641 args [2] = cache_ins;
4643 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4646 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4649 NEW_BBLOCK (cfg, is_null_bb);
4650 NEW_BBLOCK (cfg, false_bb);
4651 NEW_BBLOCK (cfg, end_bb);
4653 /* Do the assignment at the beginning, so the other assignment can be if converted */
4654 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4655 ins->type = STACK_OBJ;
4658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4663 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4664 g_assert (!context_used);
4665 /* the is_null_bb target simply copies the input register to the output */
4666 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4668 int klass_reg = alloc_preg (cfg);
4671 int rank_reg = alloc_preg (cfg);
4672 int eclass_reg = alloc_preg (cfg);
4674 g_assert (!context_used);
4675 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4680 if (klass->cast_class == mono_defaults.object_class) {
4681 int parent_reg = alloc_preg (cfg);
4682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4683 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4684 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4686 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4687 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4688 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4690 } else if (klass->cast_class == mono_defaults.enum_class) {
4691 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4696 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4697 /* Check that the object is a vector too */
4698 int bounds_reg = alloc_preg (cfg);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4704 /* the is_null_bb target simply copies the input register to the output */
4705 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4707 } else if (mono_class_is_nullable (klass)) {
4708 g_assert (!context_used);
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4710 /* the is_null_bb target simply copies the input register to the output */
4711 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4713 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4714 g_assert (!context_used);
4715 /* the remoting code is broken, access the class for now */
4716 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4717 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4719 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4720 cfg->exception_ptr = klass;
4723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4731 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4732 /* the is_null_bb target simply copies the input register to the output */
4733 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4738 MONO_START_BB (cfg, false_bb);
4740 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4741 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4743 MONO_START_BB (cfg, is_null_bb);
4745 MONO_START_BB (cfg, end_bb);
4751 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4753 /* This opcode takes as input an object reference and a class, and returns:
4754 0) if the object is an instance of the class,
4755 1) if the object is not instance of the class,
4756 2) if the object is a proxy whose type cannot be determined */
4759 #ifndef DISABLE_REMOTING
4760 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4762 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4764 int obj_reg = src->dreg;
4765 int dreg = alloc_ireg (cfg);
4767 #ifndef DISABLE_REMOTING
4768 int klass_reg = alloc_preg (cfg);
4771 NEW_BBLOCK (cfg, true_bb);
4772 NEW_BBLOCK (cfg, false_bb);
4773 NEW_BBLOCK (cfg, end_bb);
4774 #ifndef DISABLE_REMOTING
4775 NEW_BBLOCK (cfg, false2_bb);
4776 NEW_BBLOCK (cfg, no_proxy_bb);
4779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4782 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4783 #ifndef DISABLE_REMOTING
4784 NEW_BBLOCK (cfg, interface_fail_bb);
4787 tmp_reg = alloc_preg (cfg);
4788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4789 #ifndef DISABLE_REMOTING
4790 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4791 MONO_START_BB (cfg, interface_fail_bb);
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4794 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4796 tmp_reg = alloc_preg (cfg);
4797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4799 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4801 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4804 #ifndef DISABLE_REMOTING
4805 tmp_reg = alloc_preg (cfg);
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4809 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4810 tmp_reg = alloc_preg (cfg);
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4814 tmp_reg = alloc_preg (cfg);
4815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4817 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4819 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4820 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4822 MONO_START_BB (cfg, no_proxy_bb);
4824 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4826 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4830 MONO_START_BB (cfg, false_bb);
4832 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4835 #ifndef DISABLE_REMOTING
4836 MONO_START_BB (cfg, false2_bb);
4838 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4839 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4842 MONO_START_BB (cfg, true_bb);
4844 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4846 MONO_START_BB (cfg, end_bb);
4849 MONO_INST_NEW (cfg, ins, OP_ICONST);
4851 ins->type = STACK_I4;
4857 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4859 /* This opcode takes as input an object reference and a class, and returns:
4860 0) if the object is an instance of the class,
4861 1) if the object is a proxy whose type cannot be determined
4862 an InvalidCastException exception is thrown otherwhise*/
4865 #ifndef DISABLE_REMOTING
4866 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4868 MonoBasicBlock *ok_result_bb;
4870 int obj_reg = src->dreg;
4871 int dreg = alloc_ireg (cfg);
4872 int tmp_reg = alloc_preg (cfg);
4874 #ifndef DISABLE_REMOTING
4875 int klass_reg = alloc_preg (cfg);
4876 NEW_BBLOCK (cfg, end_bb);
4879 NEW_BBLOCK (cfg, ok_result_bb);
4881 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4882 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4884 save_cast_details (cfg, klass, obj_reg, FALSE);
4886 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4887 #ifndef DISABLE_REMOTING
4888 NEW_BBLOCK (cfg, interface_fail_bb);
4890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4891 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4892 MONO_START_BB (cfg, interface_fail_bb);
4893 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4895 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4897 tmp_reg = alloc_preg (cfg);
4898 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4900 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4902 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4903 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4906 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4910 #ifndef DISABLE_REMOTING
4911 NEW_BBLOCK (cfg, no_proxy_bb);
4913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4915 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4917 tmp_reg = alloc_preg (cfg);
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4921 tmp_reg = alloc_preg (cfg);
4922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4924 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4926 NEW_BBLOCK (cfg, fail_1_bb);
4928 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4930 MONO_START_BB (cfg, fail_1_bb);
4932 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4935 MONO_START_BB (cfg, no_proxy_bb);
4937 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4939 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4943 MONO_START_BB (cfg, ok_result_bb);
4945 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4947 #ifndef DISABLE_REMOTING
4948 MONO_START_BB (cfg, end_bb);
4952 MONO_INST_NEW (cfg, ins, OP_ICONST);
4954 ins->type = STACK_I4;
4959 static G_GNUC_UNUSED MonoInst*
4960 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4962 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4963 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4966 switch (enum_type->type) {
4969 #if SIZEOF_REGISTER == 8
4981 MonoInst *load, *and_, *cmp, *ceq;
4982 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4983 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4984 int dest_reg = alloc_ireg (cfg);
4986 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4987 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4988 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4989 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4991 ceq->type = STACK_I4;
4994 load = mono_decompose_opcode (cfg, load);
4995 and_ = mono_decompose_opcode (cfg, and_);
4996 cmp = mono_decompose_opcode (cfg, cmp);
4997 ceq = mono_decompose_opcode (cfg, ceq);
5005 * Returns NULL and set the cfg exception on error.
5007 static G_GNUC_UNUSED MonoInst*
5008 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5012 gpointer trampoline;
5013 MonoInst *obj, *method_ins, *tramp_ins;
5017 if (virtual_ && !cfg->llvm_only) {
5018 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5021 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5025 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5029 /* Inline the contents of mono_delegate_ctor */
5031 /* Set target field */
5032 /* Optimize away setting of NULL target */
5033 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5034 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5035 if (cfg->gen_write_barriers) {
5036 dreg = alloc_preg (cfg);
5037 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5038 emit_write_barrier (cfg, ptr, target);
5042 /* Set method field */
5043 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5047 * To avoid looking up the compiled code belonging to the target method
5048 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5049 * store it, and we fill it after the method has been compiled.
5051 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5052 MonoInst *code_slot_ins;
5055 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5057 domain = mono_domain_get ();
5058 mono_domain_lock (domain);
5059 if (!domain_jit_info (domain)->method_code_hash)
5060 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5061 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5063 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5064 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5066 mono_domain_unlock (domain);
5068 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5073 if (cfg->llvm_only) {
5074 MonoInst *args [16];
5079 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5080 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5083 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5089 if (cfg->compile_aot) {
5090 MonoDelegateClassMethodPair *del_tramp;
5092 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5093 del_tramp->klass = klass;
5094 del_tramp->method = context_used ? NULL : method;
5095 del_tramp->is_virtual = virtual_;
5096 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5099 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5101 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5102 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5105 /* Set invoke_impl field */
5107 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5109 dreg = alloc_preg (cfg);
5110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5113 dreg = alloc_preg (cfg);
5114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5118 dreg = alloc_preg (cfg);
5119 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5120 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5122 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5128 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5130 MonoJitICallInfo *info;
5132 /* Need to register the icall so it gets an icall wrapper */
5133 info = mono_get_array_new_va_icall (rank);
5135 cfg->flags |= MONO_CFG_HAS_VARARGS;
5137 /* mono_array_new_va () needs a vararg calling convention */
5138 cfg->exception_message = g_strdup ("array-new");
5139 cfg->disable_llvm = TRUE;
5141 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5142 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5146 * handle_constrained_gsharedvt_call:
5148 * Handle constrained calls where the receiver is a gsharedvt type.
5149 * Return the instruction representing the call. Set the cfg exception on failure.
5152 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5153 gboolean *ref_emit_widen)
5155 MonoInst *ins = NULL;
5156 gboolean emit_widen = *ref_emit_widen;
5159 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5160 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5161 * pack the arguments into an array, and do the rest of the work in in an icall.
5163 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5164 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5165 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5166 MonoInst *args [16];
5169 * This case handles calls to
5170 * - object:ToString()/Equals()/GetHashCode(),
5171 * - System.IComparable<T>:CompareTo()
5172 * - System.IEquatable<T>:Equals ()
5173 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5177 if (mono_method_check_context_used (cmethod))
5178 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5180 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5181 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5183 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5184 if (fsig->hasthis && fsig->param_count) {
5185 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5186 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5187 ins->dreg = alloc_preg (cfg);
5188 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5189 MONO_ADD_INS (cfg->cbb, ins);
5192 if (mini_is_gsharedvt_type (fsig->params [0])) {
5193 int addr_reg, deref_arg_reg;
5195 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5196 deref_arg_reg = alloc_preg (cfg);
5197 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5198 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5200 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5201 addr_reg = ins->dreg;
5202 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5204 EMIT_NEW_ICONST (cfg, args [3], 0);
5205 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5208 EMIT_NEW_ICONST (cfg, args [3], 0);
5209 EMIT_NEW_ICONST (cfg, args [4], 0);
5211 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5214 if (mini_is_gsharedvt_type (fsig->ret)) {
5215 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5216 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5220 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5221 MONO_ADD_INS (cfg->cbb, add);
5223 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5224 MONO_ADD_INS (cfg->cbb, ins);
5225 /* ins represents the call result */
5228 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5231 *ref_emit_widen = emit_widen;
5240 mono_emit_load_got_addr (MonoCompile *cfg)
5242 MonoInst *getaddr, *dummy_use;
5244 if (!cfg->got_var || cfg->got_var_allocated)
5247 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5248 getaddr->cil_code = cfg->header->code;
5249 getaddr->dreg = cfg->got_var->dreg;
5251 /* Add it to the start of the first bblock */
5252 if (cfg->bb_entry->code) {
5253 getaddr->next = cfg->bb_entry->code;
5254 cfg->bb_entry->code = getaddr;
5257 MONO_ADD_INS (cfg->bb_entry, getaddr);
5259 cfg->got_var_allocated = TRUE;
5262 * Add a dummy use to keep the got_var alive, since real uses might
5263 * only be generated by the back ends.
5264 * Add it to end_bblock, so the variable's lifetime covers the whole
5266 * It would be better to make the usage of the got var explicit in all
5267 * cases when the backend needs it (i.e. calls, throw etc.), so this
5268 * wouldn't be needed.
5270 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5271 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5274 static int inline_limit;
5275 static gboolean inline_limit_inited;
5278 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5280 MonoMethodHeaderSummary header;
5282 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5283 MonoMethodSignature *sig = mono_method_signature (method);
5287 if (cfg->disable_inline)
5292 if (cfg->inline_depth > 10)
5295 if (!mono_method_get_header_summary (method, &header))
5298 /*runtime, icall and pinvoke are checked by summary call*/
5299 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5300 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5301 (mono_class_is_marshalbyref (method->klass)) ||
5305 /* also consider num_locals? */
5306 /* Do the size check early to avoid creating vtables */
5307 if (!inline_limit_inited) {
5308 if (g_getenv ("MONO_INLINELIMIT"))
5309 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5311 inline_limit = INLINE_LENGTH_LIMIT;
5312 inline_limit_inited = TRUE;
5314 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5318 * if we can initialize the class of the method right away, we do,
5319 * otherwise we don't allow inlining if the class needs initialization,
5320 * since it would mean inserting a call to mono_runtime_class_init()
5321 * inside the inlined code
5323 if (!(cfg->opt & MONO_OPT_SHARED)) {
5324 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5325 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5326 vtable = mono_class_vtable (cfg->domain, method->klass);
5329 if (!cfg->compile_aot) {
5331 if (!mono_runtime_class_init_full (vtable, &error))
5332 mono_error_raise_exception (&error); /* FIXME don't raise here */
5334 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5335 if (cfg->run_cctors && method->klass->has_cctor) {
5336 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5337 if (!method->klass->runtime_info)
5338 /* No vtable created yet */
5340 vtable = mono_class_vtable (cfg->domain, method->klass);
5343 /* This makes so that inline cannot trigger */
5344 /* .cctors: too many apps depend on them */
5345 /* running with a specific order... */
5346 if (! vtable->initialized)
5349 if (!mono_runtime_class_init_full (vtable, &error))
5350 mono_error_raise_exception (&error); /* FIXME don't raise here */
5352 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5353 if (!method->klass->runtime_info)
5354 /* No vtable created yet */
5356 vtable = mono_class_vtable (cfg->domain, method->klass);
5359 if (!vtable->initialized)
5364 * If we're compiling for shared code
5365 * the cctor will need to be run at aot method load time, for example,
5366 * or at the end of the compilation of the inlining method.
5368 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5372 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5373 if (mono_arch_is_soft_float ()) {
5375 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5377 for (i = 0; i < sig->param_count; ++i)
5378 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5383 if (g_list_find (cfg->dont_inline, method))
5390 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5392 if (!cfg->compile_aot) {
5394 if (vtable->initialized)
5398 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5399 if (cfg->method == method)
5403 if (!mono_class_needs_cctor_run (klass, method))
5406 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5407 /* The initialization is already done before the method is called */
5414 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5418 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5421 if (mini_is_gsharedvt_variable_klass (klass)) {
5424 mono_class_init (klass);
5425 size = mono_class_array_element_size (klass);
5428 mult_reg = alloc_preg (cfg);
5429 array_reg = arr->dreg;
5430 index_reg = index->dreg;
5432 #if SIZEOF_REGISTER == 8
5433 /* The array reg is 64 bits but the index reg is only 32 */
5434 if (COMPILE_LLVM (cfg)) {
5436 index2_reg = index_reg;
5438 index2_reg = alloc_preg (cfg);
5439 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5442 if (index->type == STACK_I8) {
5443 index2_reg = alloc_preg (cfg);
5444 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5446 index2_reg = index_reg;
5451 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5453 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5454 if (size == 1 || size == 2 || size == 4 || size == 8) {
5455 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5457 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5458 ins->klass = mono_class_get_element_class (klass);
5459 ins->type = STACK_MP;
5465 add_reg = alloc_ireg_mp (cfg);
5468 MonoInst *rgctx_ins;
5471 g_assert (cfg->gshared);
5472 context_used = mini_class_check_context_used (cfg, klass);
5473 g_assert (context_used);
5474 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5475 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5479 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5480 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5481 ins->klass = mono_class_get_element_class (klass);
5482 ins->type = STACK_MP;
5483 MONO_ADD_INS (cfg->cbb, ins);
5489 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5491 int bounds_reg = alloc_preg (cfg);
5492 int add_reg = alloc_ireg_mp (cfg);
5493 int mult_reg = alloc_preg (cfg);
5494 int mult2_reg = alloc_preg (cfg);
5495 int low1_reg = alloc_preg (cfg);
5496 int low2_reg = alloc_preg (cfg);
5497 int high1_reg = alloc_preg (cfg);
5498 int high2_reg = alloc_preg (cfg);
5499 int realidx1_reg = alloc_preg (cfg);
5500 int realidx2_reg = alloc_preg (cfg);
5501 int sum_reg = alloc_preg (cfg);
5502 int index1, index2, tmpreg;
5506 mono_class_init (klass);
5507 size = mono_class_array_element_size (klass);
5509 index1 = index_ins1->dreg;
5510 index2 = index_ins2->dreg;
5512 #if SIZEOF_REGISTER == 8
5513 /* The array reg is 64 bits but the index reg is only 32 */
5514 if (COMPILE_LLVM (cfg)) {
5517 tmpreg = alloc_preg (cfg);
5518 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5520 tmpreg = alloc_preg (cfg);
5521 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5525 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5529 /* range checking */
5530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5531 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5534 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5535 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5537 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5538 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5539 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5542 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5543 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5545 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5547 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5549 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5550 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5552 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5553 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5555 ins->type = STACK_MP;
5557 MONO_ADD_INS (cfg->cbb, ins);
5563 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5567 MonoMethod *addr_method;
5569 MonoClass *eclass = cmethod->klass->element_class;
5571 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5574 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5576 /* emit_ldelema_2 depends on OP_LMUL */
5577 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5578 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5581 if (mini_is_gsharedvt_variable_klass (eclass))
5584 element_size = mono_class_array_element_size (eclass);
5585 addr_method = mono_marshal_get_array_address (rank, element_size);
5586 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5591 static MonoBreakPolicy
5592 always_insert_breakpoint (MonoMethod *method)
5594 return MONO_BREAK_POLICY_ALWAYS;
5597 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5600 * mono_set_break_policy:
5601 * policy_callback: the new callback function
5603 * Allow embedders to decide wherther to actually obey breakpoint instructions
5604 * (both break IL instructions and Debugger.Break () method calls), for example
5605 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5606 * untrusted or semi-trusted code.
5608 * @policy_callback will be called every time a break point instruction needs to
5609 * be inserted with the method argument being the method that calls Debugger.Break()
5610 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5611 * if it wants the breakpoint to not be effective in the given method.
5612 * #MONO_BREAK_POLICY_ALWAYS is the default.
5615 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5617 if (policy_callback)
5618 break_policy_func = policy_callback;
5620 break_policy_func = always_insert_breakpoint;
5624 should_insert_brekpoint (MonoMethod *method) {
5625 switch (break_policy_func (method)) {
5626 case MONO_BREAK_POLICY_ALWAYS:
5628 case MONO_BREAK_POLICY_NEVER:
5630 case MONO_BREAK_POLICY_ON_DBG:
5631 g_warning ("mdb no longer supported");
5634 g_warning ("Incorrect value returned from break policy callback");
5639 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5641 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5643 MonoInst *addr, *store, *load;
5644 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5646 /* the bounds check is already done by the callers */
5647 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5650 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5651 if (mini_type_is_reference (fsig->params [2]))
5652 emit_write_barrier (cfg, addr, load);
5654 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5655 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5662 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5664 return mini_type_is_reference (&klass->byval_arg);
5668 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5670 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5671 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5672 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5673 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5674 MonoInst *iargs [3];
5677 mono_class_setup_vtable (obj_array);
5678 g_assert (helper->slot);
5680 if (sp [0]->type != STACK_OBJ)
5682 if (sp [2]->type != STACK_OBJ)
5689 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5693 if (mini_is_gsharedvt_variable_klass (klass)) {
5696 // FIXME-VT: OP_ICONST optimization
5697 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5698 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5699 ins->opcode = OP_STOREV_MEMBASE;
5700 } else if (sp [1]->opcode == OP_ICONST) {
5701 int array_reg = sp [0]->dreg;
5702 int index_reg = sp [1]->dreg;
5703 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5705 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5706 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5709 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5710 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5712 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5714 if (generic_class_is_reference_type (cfg, klass))
5715 emit_write_barrier (cfg, addr, sp [2]);
5722 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5727 eklass = mono_class_from_mono_type (fsig->params [2]);
5729 eklass = mono_class_from_mono_type (fsig->ret);
5732 return emit_array_store (cfg, eklass, args, FALSE);
5734 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5735 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5741 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5744 int param_size, return_size;
5746 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5747 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5749 if (cfg->verbose_level > 3)
5750 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5752 //Don't allow mixing reference types with value types
5753 if (param_klass->valuetype != return_klass->valuetype) {
5754 if (cfg->verbose_level > 3)
5755 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5759 if (!param_klass->valuetype) {
5760 if (cfg->verbose_level > 3)
5761 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5766 if (param_klass->has_references || return_klass->has_references)
5769 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5770 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5771 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5772 if (cfg->verbose_level > 3)
5773 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5777 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5778 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5779 if (cfg->verbose_level > 3)
5780 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5784 param_size = mono_class_value_size (param_klass, &align);
5785 return_size = mono_class_value_size (return_klass, &align);
5787 //We can do it if sizes match
5788 if (param_size == return_size) {
5789 if (cfg->verbose_level > 3)
5790 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5794 //No simple way to handle struct if sizes don't match
5795 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5796 if (cfg->verbose_level > 3)
5797 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5802 * Same reg size category.
5803 * A quick note on why we don't require widening here.
5804 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5806 * Since the source value comes from a function argument, the JIT will already have
5807 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5809 if (param_size <= 4 && return_size <= 4) {
5810 if (cfg->verbose_level > 3)
5811 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5819 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5821 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5822 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5824 if (mini_is_gsharedvt_variable_type (fsig->ret))
5827 //Valuetypes that are semantically equivalent or numbers than can be widened to
5828 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5831 //Arrays of valuetypes that are semantically equivalent
5832 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5839 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5841 #ifdef MONO_ARCH_SIMD_INTRINSICS
5842 MonoInst *ins = NULL;
5844 if (cfg->opt & MONO_OPT_SIMD) {
5845 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5851 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5855 emit_memory_barrier (MonoCompile *cfg, int kind)
5857 MonoInst *ins = NULL;
5858 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5859 MONO_ADD_INS (cfg->cbb, ins);
5860 ins->backend.memory_barrier_kind = kind;
5866 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5868 MonoInst *ins = NULL;
5871 /* The LLVM backend supports these intrinsics */
5872 if (cmethod->klass == mono_defaults.math_class) {
5873 if (strcmp (cmethod->name, "Sin") == 0) {
5875 } else if (strcmp (cmethod->name, "Cos") == 0) {
5877 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5879 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5883 if (opcode && fsig->param_count == 1) {
5884 MONO_INST_NEW (cfg, ins, opcode);
5885 ins->type = STACK_R8;
5886 ins->dreg = mono_alloc_freg (cfg);
5887 ins->sreg1 = args [0]->dreg;
5888 MONO_ADD_INS (cfg->cbb, ins);
5892 if (cfg->opt & MONO_OPT_CMOV) {
5893 if (strcmp (cmethod->name, "Min") == 0) {
5894 if (fsig->params [0]->type == MONO_TYPE_I4)
5896 if (fsig->params [0]->type == MONO_TYPE_U4)
5897 opcode = OP_IMIN_UN;
5898 else if (fsig->params [0]->type == MONO_TYPE_I8)
5900 else if (fsig->params [0]->type == MONO_TYPE_U8)
5901 opcode = OP_LMIN_UN;
5902 } else if (strcmp (cmethod->name, "Max") == 0) {
5903 if (fsig->params [0]->type == MONO_TYPE_I4)
5905 if (fsig->params [0]->type == MONO_TYPE_U4)
5906 opcode = OP_IMAX_UN;
5907 else if (fsig->params [0]->type == MONO_TYPE_I8)
5909 else if (fsig->params [0]->type == MONO_TYPE_U8)
5910 opcode = OP_LMAX_UN;
5914 if (opcode && fsig->param_count == 2) {
5915 MONO_INST_NEW (cfg, ins, opcode);
5916 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5917 ins->dreg = mono_alloc_ireg (cfg);
5918 ins->sreg1 = args [0]->dreg;
5919 ins->sreg2 = args [1]->dreg;
5920 MONO_ADD_INS (cfg->cbb, ins);
5928 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5930 if (cmethod->klass == mono_defaults.array_class) {
5931 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5932 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5933 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5934 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5935 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5936 return emit_array_unsafe_mov (cfg, fsig, args);
5943 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5945 MonoInst *ins = NULL;
5947 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5949 if (cmethod->klass == mono_defaults.string_class) {
5950 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5951 int dreg = alloc_ireg (cfg);
5952 int index_reg = alloc_preg (cfg);
5953 int add_reg = alloc_preg (cfg);
5955 #if SIZEOF_REGISTER == 8
5956 if (COMPILE_LLVM (cfg)) {
5957 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5959 /* The array reg is 64 bits but the index reg is only 32 */
5960 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5963 index_reg = args [1]->dreg;
5965 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5967 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5968 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5969 add_reg = ins->dreg;
5970 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5973 int mult_reg = alloc_preg (cfg);
5974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5975 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5976 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5977 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5979 type_from_op (cfg, ins, NULL, NULL);
5981 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5982 int dreg = alloc_ireg (cfg);
5983 /* Decompose later to allow more optimizations */
5984 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5985 ins->type = STACK_I4;
5986 ins->flags |= MONO_INST_FAULT;
5987 cfg->cbb->has_array_access = TRUE;
5988 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5993 } else if (cmethod->klass == mono_defaults.object_class) {
5994 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5995 int dreg = alloc_ireg_ref (cfg);
5996 int vt_reg = alloc_preg (cfg);
5997 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5998 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5999 type_from_op (cfg, ins, NULL, NULL);
6002 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6003 int dreg = alloc_ireg (cfg);
6004 int t1 = alloc_ireg (cfg);
6006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6007 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6008 ins->type = STACK_I4;
6011 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6012 MONO_INST_NEW (cfg, ins, OP_NOP);
6013 MONO_ADD_INS (cfg->cbb, ins);
6017 } else if (cmethod->klass == mono_defaults.array_class) {
6018 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6019 return emit_array_generic_access (cfg, fsig, args, FALSE);
6020 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6021 return emit_array_generic_access (cfg, fsig, args, TRUE);
6023 #ifndef MONO_BIG_ARRAYS
6025 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6028 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6029 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6030 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6031 int dreg = alloc_ireg (cfg);
6032 int bounds_reg = alloc_ireg_mp (cfg);
6033 MonoBasicBlock *end_bb, *szarray_bb;
6034 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6036 NEW_BBLOCK (cfg, end_bb);
6037 NEW_BBLOCK (cfg, szarray_bb);
6039 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6040 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6043 /* Non-szarray case */
6045 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6046 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6048 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6049 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6051 MONO_START_BB (cfg, szarray_bb);
6054 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6055 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6057 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6058 MONO_START_BB (cfg, end_bb);
6060 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6061 ins->type = STACK_I4;
6067 if (cmethod->name [0] != 'g')
6070 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6071 int dreg = alloc_ireg (cfg);
6072 int vtable_reg = alloc_preg (cfg);
6073 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6074 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6075 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6076 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6077 type_from_op (cfg, ins, NULL, NULL);
6080 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6081 int dreg = alloc_ireg (cfg);
6083 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6084 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6085 type_from_op (cfg, ins, NULL, NULL);
6090 } else if (cmethod->klass == runtime_helpers_class) {
6091 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6092 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6096 } else if (cmethod->klass == mono_defaults.monitor_class) {
6097 gboolean is_enter = FALSE;
6098 gboolean is_v4 = FALSE;
6100 if (!strcmp (cmethod->name, "enter_with_atomic_var") && mono_method_signature (cmethod)->param_count == 2) {
6104 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6109 * To make async stack traces work, icalls which can block should have a wrapper.
6110 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6112 MonoBasicBlock *end_bb;
6114 NEW_BBLOCK (cfg, end_bb);
6116 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6119 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6120 MONO_START_BB (cfg, end_bb);
6123 } else if (cmethod->klass == mono_defaults.thread_class) {
6124 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6125 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6126 MONO_ADD_INS (cfg->cbb, ins);
6128 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6129 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6130 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6132 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6134 if (fsig->params [0]->type == MONO_TYPE_I1)
6135 opcode = OP_LOADI1_MEMBASE;
6136 else if (fsig->params [0]->type == MONO_TYPE_U1)
6137 opcode = OP_LOADU1_MEMBASE;
6138 else if (fsig->params [0]->type == MONO_TYPE_I2)
6139 opcode = OP_LOADI2_MEMBASE;
6140 else if (fsig->params [0]->type == MONO_TYPE_U2)
6141 opcode = OP_LOADU2_MEMBASE;
6142 else if (fsig->params [0]->type == MONO_TYPE_I4)
6143 opcode = OP_LOADI4_MEMBASE;
6144 else if (fsig->params [0]->type == MONO_TYPE_U4)
6145 opcode = OP_LOADU4_MEMBASE;
6146 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6147 opcode = OP_LOADI8_MEMBASE;
6148 else if (fsig->params [0]->type == MONO_TYPE_R4)
6149 opcode = OP_LOADR4_MEMBASE;
6150 else if (fsig->params [0]->type == MONO_TYPE_R8)
6151 opcode = OP_LOADR8_MEMBASE;
6152 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6153 opcode = OP_LOAD_MEMBASE;
6156 MONO_INST_NEW (cfg, ins, opcode);
6157 ins->inst_basereg = args [0]->dreg;
6158 ins->inst_offset = 0;
6159 MONO_ADD_INS (cfg->cbb, ins);
6161 switch (fsig->params [0]->type) {
6168 ins->dreg = mono_alloc_ireg (cfg);
6169 ins->type = STACK_I4;
6173 ins->dreg = mono_alloc_lreg (cfg);
6174 ins->type = STACK_I8;
6178 ins->dreg = mono_alloc_ireg (cfg);
6179 #if SIZEOF_REGISTER == 8
6180 ins->type = STACK_I8;
6182 ins->type = STACK_I4;
6187 ins->dreg = mono_alloc_freg (cfg);
6188 ins->type = STACK_R8;
6191 g_assert (mini_type_is_reference (fsig->params [0]));
6192 ins->dreg = mono_alloc_ireg_ref (cfg);
6193 ins->type = STACK_OBJ;
6197 if (opcode == OP_LOADI8_MEMBASE)
6198 ins = mono_decompose_opcode (cfg, ins);
6200 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6204 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6206 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6208 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6209 opcode = OP_STOREI1_MEMBASE_REG;
6210 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6211 opcode = OP_STOREI2_MEMBASE_REG;
6212 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6213 opcode = OP_STOREI4_MEMBASE_REG;
6214 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6215 opcode = OP_STOREI8_MEMBASE_REG;
6216 else if (fsig->params [0]->type == MONO_TYPE_R4)
6217 opcode = OP_STORER4_MEMBASE_REG;
6218 else if (fsig->params [0]->type == MONO_TYPE_R8)
6219 opcode = OP_STORER8_MEMBASE_REG;
6220 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6221 opcode = OP_STORE_MEMBASE_REG;
6224 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6226 MONO_INST_NEW (cfg, ins, opcode);
6227 ins->sreg1 = args [1]->dreg;
6228 ins->inst_destbasereg = args [0]->dreg;
6229 ins->inst_offset = 0;
6230 MONO_ADD_INS (cfg->cbb, ins);
6232 if (opcode == OP_STOREI8_MEMBASE_REG)
6233 ins = mono_decompose_opcode (cfg, ins);
6238 } else if (cmethod->klass->image == mono_defaults.corlib &&
6239 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6240 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6243 #if SIZEOF_REGISTER == 8
6244 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6245 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6246 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6247 ins->dreg = mono_alloc_preg (cfg);
6248 ins->sreg1 = args [0]->dreg;
6249 ins->type = STACK_I8;
6250 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6251 MONO_ADD_INS (cfg->cbb, ins);
6255 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6257 /* 64 bit reads are already atomic */
6258 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6259 load_ins->dreg = mono_alloc_preg (cfg);
6260 load_ins->inst_basereg = args [0]->dreg;
6261 load_ins->inst_offset = 0;
6262 load_ins->type = STACK_I8;
6263 MONO_ADD_INS (cfg->cbb, load_ins);
6265 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6272 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6273 MonoInst *ins_iconst;
6276 if (fsig->params [0]->type == MONO_TYPE_I4) {
6277 opcode = OP_ATOMIC_ADD_I4;
6278 cfg->has_atomic_add_i4 = TRUE;
6280 #if SIZEOF_REGISTER == 8
6281 else if (fsig->params [0]->type == MONO_TYPE_I8)
6282 opcode = OP_ATOMIC_ADD_I8;
6285 if (!mono_arch_opcode_supported (opcode))
6287 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6288 ins_iconst->inst_c0 = 1;
6289 ins_iconst->dreg = mono_alloc_ireg (cfg);
6290 MONO_ADD_INS (cfg->cbb, ins_iconst);
6292 MONO_INST_NEW (cfg, ins, opcode);
6293 ins->dreg = mono_alloc_ireg (cfg);
6294 ins->inst_basereg = args [0]->dreg;
6295 ins->inst_offset = 0;
6296 ins->sreg2 = ins_iconst->dreg;
6297 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6298 MONO_ADD_INS (cfg->cbb, ins);
6300 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6301 MonoInst *ins_iconst;
6304 if (fsig->params [0]->type == MONO_TYPE_I4) {
6305 opcode = OP_ATOMIC_ADD_I4;
6306 cfg->has_atomic_add_i4 = TRUE;
6308 #if SIZEOF_REGISTER == 8
6309 else if (fsig->params [0]->type == MONO_TYPE_I8)
6310 opcode = OP_ATOMIC_ADD_I8;
6313 if (!mono_arch_opcode_supported (opcode))
6315 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6316 ins_iconst->inst_c0 = -1;
6317 ins_iconst->dreg = mono_alloc_ireg (cfg);
6318 MONO_ADD_INS (cfg->cbb, ins_iconst);
6320 MONO_INST_NEW (cfg, ins, opcode);
6321 ins->dreg = mono_alloc_ireg (cfg);
6322 ins->inst_basereg = args [0]->dreg;
6323 ins->inst_offset = 0;
6324 ins->sreg2 = ins_iconst->dreg;
6325 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6326 MONO_ADD_INS (cfg->cbb, ins);
6328 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6331 if (fsig->params [0]->type == MONO_TYPE_I4) {
6332 opcode = OP_ATOMIC_ADD_I4;
6333 cfg->has_atomic_add_i4 = TRUE;
6335 #if SIZEOF_REGISTER == 8
6336 else if (fsig->params [0]->type == MONO_TYPE_I8)
6337 opcode = OP_ATOMIC_ADD_I8;
6340 if (!mono_arch_opcode_supported (opcode))
6342 MONO_INST_NEW (cfg, ins, opcode);
6343 ins->dreg = mono_alloc_ireg (cfg);
6344 ins->inst_basereg = args [0]->dreg;
6345 ins->inst_offset = 0;
6346 ins->sreg2 = args [1]->dreg;
6347 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6348 MONO_ADD_INS (cfg->cbb, ins);
6351 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6352 MonoInst *f2i = NULL, *i2f;
6353 guint32 opcode, f2i_opcode, i2f_opcode;
6354 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6355 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6357 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6358 fsig->params [0]->type == MONO_TYPE_R4) {
6359 opcode = OP_ATOMIC_EXCHANGE_I4;
6360 f2i_opcode = OP_MOVE_F_TO_I4;
6361 i2f_opcode = OP_MOVE_I4_TO_F;
6362 cfg->has_atomic_exchange_i4 = TRUE;
6364 #if SIZEOF_REGISTER == 8
6366 fsig->params [0]->type == MONO_TYPE_I8 ||
6367 fsig->params [0]->type == MONO_TYPE_R8 ||
6368 fsig->params [0]->type == MONO_TYPE_I) {
6369 opcode = OP_ATOMIC_EXCHANGE_I8;
6370 f2i_opcode = OP_MOVE_F_TO_I8;
6371 i2f_opcode = OP_MOVE_I8_TO_F;
6374 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6375 opcode = OP_ATOMIC_EXCHANGE_I4;
6376 cfg->has_atomic_exchange_i4 = TRUE;
6382 if (!mono_arch_opcode_supported (opcode))
6386 /* TODO: Decompose these opcodes instead of bailing here. */
6387 if (COMPILE_SOFT_FLOAT (cfg))
6390 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6391 f2i->dreg = mono_alloc_ireg (cfg);
6392 f2i->sreg1 = args [1]->dreg;
6393 if (f2i_opcode == OP_MOVE_F_TO_I4)
6394 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6395 MONO_ADD_INS (cfg->cbb, f2i);
6398 MONO_INST_NEW (cfg, ins, opcode);
6399 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6400 ins->inst_basereg = args [0]->dreg;
6401 ins->inst_offset = 0;
6402 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6403 MONO_ADD_INS (cfg->cbb, ins);
6405 switch (fsig->params [0]->type) {
6407 ins->type = STACK_I4;
6410 ins->type = STACK_I8;
6413 #if SIZEOF_REGISTER == 8
6414 ins->type = STACK_I8;
6416 ins->type = STACK_I4;
6421 ins->type = STACK_R8;
6424 g_assert (mini_type_is_reference (fsig->params [0]));
6425 ins->type = STACK_OBJ;
6430 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6431 i2f->dreg = mono_alloc_freg (cfg);
6432 i2f->sreg1 = ins->dreg;
6433 i2f->type = STACK_R8;
6434 if (i2f_opcode == OP_MOVE_I4_TO_F)
6435 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6436 MONO_ADD_INS (cfg->cbb, i2f);
6441 if (cfg->gen_write_barriers && is_ref)
6442 emit_write_barrier (cfg, args [0], args [1]);
6444 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6445 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6446 guint32 opcode, f2i_opcode, i2f_opcode;
6447 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6448 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6450 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6451 fsig->params [1]->type == MONO_TYPE_R4) {
6452 opcode = OP_ATOMIC_CAS_I4;
6453 f2i_opcode = OP_MOVE_F_TO_I4;
6454 i2f_opcode = OP_MOVE_I4_TO_F;
6455 cfg->has_atomic_cas_i4 = TRUE;
6457 #if SIZEOF_REGISTER == 8
6459 fsig->params [1]->type == MONO_TYPE_I8 ||
6460 fsig->params [1]->type == MONO_TYPE_R8 ||
6461 fsig->params [1]->type == MONO_TYPE_I) {
6462 opcode = OP_ATOMIC_CAS_I8;
6463 f2i_opcode = OP_MOVE_F_TO_I8;
6464 i2f_opcode = OP_MOVE_I8_TO_F;
6467 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6468 opcode = OP_ATOMIC_CAS_I4;
6469 cfg->has_atomic_cas_i4 = TRUE;
6475 if (!mono_arch_opcode_supported (opcode))
6479 /* TODO: Decompose these opcodes instead of bailing here. */
6480 if (COMPILE_SOFT_FLOAT (cfg))
6483 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6484 f2i_new->dreg = mono_alloc_ireg (cfg);
6485 f2i_new->sreg1 = args [1]->dreg;
6486 if (f2i_opcode == OP_MOVE_F_TO_I4)
6487 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6488 MONO_ADD_INS (cfg->cbb, f2i_new);
6490 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6491 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6492 f2i_cmp->sreg1 = args [2]->dreg;
6493 if (f2i_opcode == OP_MOVE_F_TO_I4)
6494 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6495 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6498 MONO_INST_NEW (cfg, ins, opcode);
6499 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6500 ins->sreg1 = args [0]->dreg;
6501 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6502 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6503 MONO_ADD_INS (cfg->cbb, ins);
6505 switch (fsig->params [1]->type) {
6507 ins->type = STACK_I4;
6510 ins->type = STACK_I8;
6513 #if SIZEOF_REGISTER == 8
6514 ins->type = STACK_I8;
6516 ins->type = STACK_I4;
6520 ins->type = cfg->r4_stack_type;
6523 ins->type = STACK_R8;
6526 g_assert (mini_type_is_reference (fsig->params [1]));
6527 ins->type = STACK_OBJ;
6532 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6533 i2f->dreg = mono_alloc_freg (cfg);
6534 i2f->sreg1 = ins->dreg;
6535 i2f->type = STACK_R8;
6536 if (i2f_opcode == OP_MOVE_I4_TO_F)
6537 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6538 MONO_ADD_INS (cfg->cbb, i2f);
6543 if (cfg->gen_write_barriers && is_ref)
6544 emit_write_barrier (cfg, args [0], args [1]);
6546 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6547 fsig->params [1]->type == MONO_TYPE_I4) {
6548 MonoInst *cmp, *ceq;
6550 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6553 /* int32 r = CAS (location, value, comparand); */
6554 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6555 ins->dreg = alloc_ireg (cfg);
6556 ins->sreg1 = args [0]->dreg;
6557 ins->sreg2 = args [1]->dreg;
6558 ins->sreg3 = args [2]->dreg;
6559 ins->type = STACK_I4;
6560 MONO_ADD_INS (cfg->cbb, ins);
6562 /* bool result = r == comparand; */
6563 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6564 cmp->sreg1 = ins->dreg;
6565 cmp->sreg2 = args [2]->dreg;
6566 cmp->type = STACK_I4;
6567 MONO_ADD_INS (cfg->cbb, cmp);
6569 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6570 ceq->dreg = alloc_ireg (cfg);
6571 ceq->type = STACK_I4;
6572 MONO_ADD_INS (cfg->cbb, ceq);
6574 /* *success = result; */
6575 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6577 cfg->has_atomic_cas_i4 = TRUE;
6579 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6580 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6584 } else if (cmethod->klass->image == mono_defaults.corlib &&
6585 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6586 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6589 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6591 MonoType *t = fsig->params [0];
6593 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6595 g_assert (t->byref);
6596 /* t is a byref type, so the reference check is more complicated */
6597 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6598 if (t->type == MONO_TYPE_I1)
6599 opcode = OP_ATOMIC_LOAD_I1;
6600 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6601 opcode = OP_ATOMIC_LOAD_U1;
6602 else if (t->type == MONO_TYPE_I2)
6603 opcode = OP_ATOMIC_LOAD_I2;
6604 else if (t->type == MONO_TYPE_U2)
6605 opcode = OP_ATOMIC_LOAD_U2;
6606 else if (t->type == MONO_TYPE_I4)
6607 opcode = OP_ATOMIC_LOAD_I4;
6608 else if (t->type == MONO_TYPE_U4)
6609 opcode = OP_ATOMIC_LOAD_U4;
6610 else if (t->type == MONO_TYPE_R4)
6611 opcode = OP_ATOMIC_LOAD_R4;
6612 else if (t->type == MONO_TYPE_R8)
6613 opcode = OP_ATOMIC_LOAD_R8;
6614 #if SIZEOF_REGISTER == 8
6615 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6616 opcode = OP_ATOMIC_LOAD_I8;
6617 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6618 opcode = OP_ATOMIC_LOAD_U8;
6620 else if (t->type == MONO_TYPE_I)
6621 opcode = OP_ATOMIC_LOAD_I4;
6622 else if (is_ref || t->type == MONO_TYPE_U)
6623 opcode = OP_ATOMIC_LOAD_U4;
6627 if (!mono_arch_opcode_supported (opcode))
6630 MONO_INST_NEW (cfg, ins, opcode);
6631 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6632 ins->sreg1 = args [0]->dreg;
6633 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6634 MONO_ADD_INS (cfg->cbb, ins);
6637 case MONO_TYPE_BOOLEAN:
6644 ins->type = STACK_I4;
6648 ins->type = STACK_I8;
6652 #if SIZEOF_REGISTER == 8
6653 ins->type = STACK_I8;
6655 ins->type = STACK_I4;
6659 ins->type = cfg->r4_stack_type;
6662 ins->type = STACK_R8;
6666 ins->type = STACK_OBJ;
6672 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6674 MonoType *t = fsig->params [0];
6677 g_assert (t->byref);
6678 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6679 if (t->type == MONO_TYPE_I1)
6680 opcode = OP_ATOMIC_STORE_I1;
6681 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6682 opcode = OP_ATOMIC_STORE_U1;
6683 else if (t->type == MONO_TYPE_I2)
6684 opcode = OP_ATOMIC_STORE_I2;
6685 else if (t->type == MONO_TYPE_U2)
6686 opcode = OP_ATOMIC_STORE_U2;
6687 else if (t->type == MONO_TYPE_I4)
6688 opcode = OP_ATOMIC_STORE_I4;
6689 else if (t->type == MONO_TYPE_U4)
6690 opcode = OP_ATOMIC_STORE_U4;
6691 else if (t->type == MONO_TYPE_R4)
6692 opcode = OP_ATOMIC_STORE_R4;
6693 else if (t->type == MONO_TYPE_R8)
6694 opcode = OP_ATOMIC_STORE_R8;
6695 #if SIZEOF_REGISTER == 8
6696 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6697 opcode = OP_ATOMIC_STORE_I8;
6698 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6699 opcode = OP_ATOMIC_STORE_U8;
6701 else if (t->type == MONO_TYPE_I)
6702 opcode = OP_ATOMIC_STORE_I4;
6703 else if (is_ref || t->type == MONO_TYPE_U)
6704 opcode = OP_ATOMIC_STORE_U4;
6708 if (!mono_arch_opcode_supported (opcode))
6711 MONO_INST_NEW (cfg, ins, opcode);
6712 ins->dreg = args [0]->dreg;
6713 ins->sreg1 = args [1]->dreg;
6714 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6715 MONO_ADD_INS (cfg->cbb, ins);
6717 if (cfg->gen_write_barriers && is_ref)
6718 emit_write_barrier (cfg, args [0], args [1]);
6724 } else if (cmethod->klass->image == mono_defaults.corlib &&
6725 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6726 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6727 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6728 if (should_insert_brekpoint (cfg->method)) {
6729 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6731 MONO_INST_NEW (cfg, ins, OP_NOP);
6732 MONO_ADD_INS (cfg->cbb, ins);
6736 } else if (cmethod->klass->image == mono_defaults.corlib &&
6737 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6738 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6739 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6741 EMIT_NEW_ICONST (cfg, ins, 1);
6743 EMIT_NEW_ICONST (cfg, ins, 0);
6746 } else if (cmethod->klass->image == mono_defaults.corlib &&
6747 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6748 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6749 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6750 /* No stack walks are currently available, so implement this as an intrinsic */
6751 MonoInst *assembly_ins;
6753 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6754 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6757 } else if (cmethod->klass->image == mono_defaults.corlib &&
6758 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6759 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6760 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6761 /* No stack walks are currently available, so implement this as an intrinsic */
6762 MonoInst *method_ins;
6763 MonoMethod *declaring = cfg->method;
6765 /* This returns the declaring generic method */
6766 if (declaring->is_inflated)
6767 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6768 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6769 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6770 cfg->no_inline = TRUE;
6771 if (cfg->method != cfg->current_method)
6772 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6775 } else if (cmethod->klass == mono_defaults.math_class) {
6777 * There is general branchless code for Min/Max, but it does not work for
6779 * http://everything2.com/?node_id=1051618
6781 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6782 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6783 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6784 !strcmp (cmethod->klass->name, "Selector")) ||
6785 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6786 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6787 !strcmp (cmethod->klass->name, "Selector"))
6789 if (cfg->backend->have_objc_get_selector &&
6790 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6791 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6792 cfg->compile_aot && !cfg->llvm_only) {
6794 MonoJumpInfoToken *ji;
6799 cfg->exception_message = g_strdup ("GetHandle");
6800 cfg->disable_llvm = TRUE;
6802 if (args [0]->opcode == OP_GOT_ENTRY) {
6803 pi = (MonoInst *)args [0]->inst_p1;
6804 g_assert (pi->opcode == OP_PATCH_INFO);
6805 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6806 ji = (MonoJumpInfoToken *)pi->inst_p0;
6808 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6809 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6812 NULLIFY_INS (args [0]);
6815 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6816 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6817 ins->dreg = mono_alloc_ireg (cfg);
6819 ins->inst_p0 = mono_string_to_utf8 (s);
6820 MONO_ADD_INS (cfg->cbb, ins);
6825 #ifdef MONO_ARCH_SIMD_INTRINSICS
6826 if (cfg->opt & MONO_OPT_SIMD) {
6827 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6833 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6837 if (COMPILE_LLVM (cfg)) {
6838 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6843 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6847 * This entry point could be used later for arbitrary method
6850 inline static MonoInst*
6851 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6852 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6854 if (method->klass == mono_defaults.string_class) {
6855 /* managed string allocation support */
6856 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6857 MonoInst *iargs [2];
6858 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6859 MonoMethod *managed_alloc = NULL;
6861 g_assert (vtable); /*Should not fail since it System.String*/
6862 #ifndef MONO_CROSS_COMPILE
6863 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6867 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6868 iargs [1] = args [0];
6869 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6876 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6878 MonoInst *store, *temp;
6881 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6882 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6885 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6886 * would be different than the MonoInst's used to represent arguments, and
6887 * the ldelema implementation can't deal with that.
6888 * Solution: When ldelema is used on an inline argument, create a var for
6889 * it, emit ldelema on that var, and emit the saving code below in
6890 * inline_method () if needed.
6892 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6893 cfg->args [i] = temp;
6894 /* This uses cfg->args [i] which is set by the preceeding line */
6895 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6896 store->cil_code = sp [0]->cil_code;
6901 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6902 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6904 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6906 check_inline_called_method_name_limit (MonoMethod *called_method)
6909 static const char *limit = NULL;
6911 if (limit == NULL) {
6912 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6914 if (limit_string != NULL)
6915 limit = limit_string;
6920 if (limit [0] != '\0') {
6921 char *called_method_name = mono_method_full_name (called_method, TRUE);
6923 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6924 g_free (called_method_name);
6926 //return (strncmp_result <= 0);
6927 return (strncmp_result == 0);
6934 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6936 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6939 static const char *limit = NULL;
6941 if (limit == NULL) {
6942 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6943 if (limit_string != NULL) {
6944 limit = limit_string;
6950 if (limit [0] != '\0') {
6951 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6953 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6954 g_free (caller_method_name);
6956 //return (strncmp_result <= 0);
6957 return (strncmp_result == 0);
6965 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6967 static double r8_0 = 0.0;
6968 static float r4_0 = 0.0;
6972 rtype = mini_get_underlying_type (rtype);
6976 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6977 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6978 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6979 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6980 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6981 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6982 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6983 ins->type = STACK_R4;
6984 ins->inst_p0 = (void*)&r4_0;
6986 MONO_ADD_INS (cfg->cbb, ins);
6987 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6988 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6989 ins->type = STACK_R8;
6990 ins->inst_p0 = (void*)&r8_0;
6992 MONO_ADD_INS (cfg->cbb, ins);
6993 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6994 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6995 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6996 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6997 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6999 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7004 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7008 rtype = mini_get_underlying_type (rtype);
7012 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7013 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7014 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7015 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7016 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7017 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7018 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7019 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7020 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7021 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7022 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7023 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7024 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7025 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7027 emit_init_rvar (cfg, dreg, rtype);
7031 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7033 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7035 MonoInst *var = cfg->locals [local];
7036 if (COMPILE_SOFT_FLOAT (cfg)) {
7038 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7039 emit_init_rvar (cfg, reg, type);
7040 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7043 emit_init_rvar (cfg, var->dreg, type);
7045 emit_dummy_init_rvar (cfg, var->dreg, type);
7052 * Return the cost of inlining CMETHOD.
7055 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7056 guchar *ip, guint real_offset, gboolean inline_always)
7058 MonoInst *ins, *rvar = NULL;
7059 MonoMethodHeader *cheader;
7060 MonoBasicBlock *ebblock, *sbblock;
7062 MonoMethod *prev_inlined_method;
7063 MonoInst **prev_locals, **prev_args;
7064 MonoType **prev_arg_types;
7065 guint prev_real_offset;
7066 GHashTable *prev_cbb_hash;
7067 MonoBasicBlock **prev_cil_offset_to_bb;
7068 MonoBasicBlock *prev_cbb;
7069 unsigned char* prev_cil_start;
7070 guint32 prev_cil_offset_to_bb_len;
7071 MonoMethod *prev_current_method;
7072 MonoGenericContext *prev_generic_context;
7073 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7075 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7077 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7078 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7081 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7082 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7087 fsig = mono_method_signature (cmethod);
7089 if (cfg->verbose_level > 2)
7090 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7092 if (!cmethod->inline_info) {
7093 cfg->stat_inlineable_methods++;
7094 cmethod->inline_info = 1;
7097 /* allocate local variables */
7098 cheader = mono_method_get_header (cmethod);
7100 if (cheader == NULL || mono_loader_get_last_error ()) {
7102 mono_metadata_free_mh (cheader);
7103 if (inline_always && mono_loader_get_last_error ()) {
7104 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7105 mono_error_set_from_loader_error (&cfg->error);
7108 mono_loader_clear_error ();
7112 /*Must verify before creating locals as it can cause the JIT to assert.*/
7113 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7114 mono_metadata_free_mh (cheader);
7118 /* allocate space to store the return value */
7119 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7120 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7123 prev_locals = cfg->locals;
7124 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7125 for (i = 0; i < cheader->num_locals; ++i)
7126 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7128 /* allocate start and end blocks */
7129 /* This is needed so if the inline is aborted, we can clean up */
7130 NEW_BBLOCK (cfg, sbblock);
7131 sbblock->real_offset = real_offset;
7133 NEW_BBLOCK (cfg, ebblock);
7134 ebblock->block_num = cfg->num_bblocks++;
7135 ebblock->real_offset = real_offset;
7137 prev_args = cfg->args;
7138 prev_arg_types = cfg->arg_types;
7139 prev_inlined_method = cfg->inlined_method;
7140 cfg->inlined_method = cmethod;
7141 cfg->ret_var_set = FALSE;
7142 cfg->inline_depth ++;
7143 prev_real_offset = cfg->real_offset;
7144 prev_cbb_hash = cfg->cbb_hash;
7145 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7146 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7147 prev_cil_start = cfg->cil_start;
7148 prev_cbb = cfg->cbb;
7149 prev_current_method = cfg->current_method;
7150 prev_generic_context = cfg->generic_context;
7151 prev_ret_var_set = cfg->ret_var_set;
7152 prev_disable_inline = cfg->disable_inline;
7154 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7157 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7159 ret_var_set = cfg->ret_var_set;
7161 cfg->inlined_method = prev_inlined_method;
7162 cfg->real_offset = prev_real_offset;
7163 cfg->cbb_hash = prev_cbb_hash;
7164 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7165 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7166 cfg->cil_start = prev_cil_start;
7167 cfg->locals = prev_locals;
7168 cfg->args = prev_args;
7169 cfg->arg_types = prev_arg_types;
7170 cfg->current_method = prev_current_method;
7171 cfg->generic_context = prev_generic_context;
7172 cfg->ret_var_set = prev_ret_var_set;
7173 cfg->disable_inline = prev_disable_inline;
7174 cfg->inline_depth --;
7176 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7177 if (cfg->verbose_level > 2)
7178 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7180 cfg->stat_inlined_methods++;
7182 /* always add some code to avoid block split failures */
7183 MONO_INST_NEW (cfg, ins, OP_NOP);
7184 MONO_ADD_INS (prev_cbb, ins);
7186 prev_cbb->next_bb = sbblock;
7187 link_bblock (cfg, prev_cbb, sbblock);
7190 * Get rid of the begin and end bblocks if possible to aid local
7193 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7195 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7196 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7198 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7199 MonoBasicBlock *prev = ebblock->in_bb [0];
7201 if (prev->next_bb == ebblock) {
7202 mono_merge_basic_blocks (cfg, prev, ebblock);
7204 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7205 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7206 cfg->cbb = prev_cbb;
7209 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7214 * Its possible that the rvar is set in some prev bblock, but not in others.
7220 for (i = 0; i < ebblock->in_count; ++i) {
7221 bb = ebblock->in_bb [i];
7223 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7226 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7236 * If the inlined method contains only a throw, then the ret var is not
7237 * set, so set it to a dummy value.
7240 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7242 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7245 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7248 if (cfg->verbose_level > 2)
7249 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7250 cfg->exception_type = MONO_EXCEPTION_NONE;
7251 mono_loader_clear_error ();
7253 /* This gets rid of the newly added bblocks */
7254 cfg->cbb = prev_cbb;
7256 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7261 * Some of these comments may well be out-of-date.
7262 * Design decisions: we do a single pass over the IL code (and we do bblock
7263 * splitting/merging in the few cases when it's required: a back jump to an IL
7264 * address that was not already seen as bblock starting point).
7265 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7266 * Complex operations are decomposed in simpler ones right away. We need to let the
7267 * arch-specific code peek and poke inside this process somehow (except when the
7268 * optimizations can take advantage of the full semantic info of coarse opcodes).
7269 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7270 * MonoInst->opcode initially is the IL opcode or some simplification of that
7271 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7272 * opcode with value bigger than OP_LAST.
7273 * At this point the IR can be handed over to an interpreter, a dumb code generator
7274 * or to the optimizing code generator that will translate it to SSA form.
7276 * Profiling directed optimizations.
7277 * We may compile by default with few or no optimizations and instrument the code
7278 * or the user may indicate what methods to optimize the most either in a config file
7279 * or through repeated runs where the compiler applies offline the optimizations to
7280 * each method and then decides if it was worth it.
7283 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7284 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7285 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7286 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7287 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7288 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7289 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7290 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7292 /* offset from br.s -> br like opcodes */
7293 #define BIG_BRANCH_OFFSET 13
7296 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7298 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7300 return b == NULL || b == bb;
7304 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7306 unsigned char *ip = start;
7307 unsigned char *target;
7310 MonoBasicBlock *bblock;
7311 const MonoOpcode *opcode;
7314 cli_addr = ip - start;
7315 i = mono_opcode_value ((const guint8 **)&ip, end);
7318 opcode = &mono_opcodes [i];
7319 switch (opcode->argument) {
7320 case MonoInlineNone:
7323 case MonoInlineString:
7324 case MonoInlineType:
7325 case MonoInlineField:
7326 case MonoInlineMethod:
7329 case MonoShortInlineR:
7336 case MonoShortInlineVar:
7337 case MonoShortInlineI:
7340 case MonoShortInlineBrTarget:
7341 target = start + cli_addr + 2 + (signed char)ip [1];
7342 GET_BBLOCK (cfg, bblock, target);
7345 GET_BBLOCK (cfg, bblock, ip);
7347 case MonoInlineBrTarget:
7348 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7349 GET_BBLOCK (cfg, bblock, target);
7352 GET_BBLOCK (cfg, bblock, ip);
7354 case MonoInlineSwitch: {
7355 guint32 n = read32 (ip + 1);
7358 cli_addr += 5 + 4 * n;
7359 target = start + cli_addr;
7360 GET_BBLOCK (cfg, bblock, target);
7362 for (j = 0; j < n; ++j) {
7363 target = start + cli_addr + (gint32)read32 (ip);
7364 GET_BBLOCK (cfg, bblock, target);
7374 g_assert_not_reached ();
7377 if (i == CEE_THROW) {
7378 unsigned char *bb_start = ip - 1;
7380 /* Find the start of the bblock containing the throw */
7382 while ((bb_start >= start) && !bblock) {
7383 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7387 bblock->out_of_line = 1;
7397 static inline MonoMethod *
7398 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7402 mono_error_init (error);
7404 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7405 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7407 method = mono_class_inflate_generic_method_checked (method, context, error);
7410 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7416 static inline MonoMethod *
7417 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7420 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7422 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7423 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7427 if (!method && !cfg)
7428 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7433 static inline MonoClass*
7434 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7439 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7440 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7442 klass = mono_class_inflate_generic_class (klass, context);
7444 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7445 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7448 mono_class_init (klass);
7452 static inline MonoMethodSignature*
7453 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7455 MonoMethodSignature *fsig;
7457 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7458 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7460 fsig = mono_metadata_parse_signature (method->klass->image, token);
7464 fsig = mono_inflate_generic_signature(fsig, context, &error);
7466 g_assert(mono_error_ok(&error));
7472 throw_exception (void)
7474 static MonoMethod *method = NULL;
7477 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7478 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7485 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7487 MonoMethod *thrower = throw_exception ();
7490 EMIT_NEW_PCONST (cfg, args [0], ex);
7491 mono_emit_method_call (cfg, thrower, args, NULL);
7495 * Return the original method is a wrapper is specified. We can only access
7496 * the custom attributes from the original method.
7499 get_original_method (MonoMethod *method)
7501 if (method->wrapper_type == MONO_WRAPPER_NONE)
7504 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7505 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7508 /* in other cases we need to find the original method */
7509 return mono_marshal_method_from_wrapper (method);
7513 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7515 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7516 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7518 emit_throw_exception (cfg, ex);
7522 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7524 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7525 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7527 emit_throw_exception (cfg, ex);
7531 * Check that the IL instructions at ip are the array initialization
7532 * sequence and return the pointer to the data and the size.
7535 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7538 * newarr[System.Int32]
7540 * ldtoken field valuetype ...
7541 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7543 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7545 guint32 token = read32 (ip + 7);
7546 guint32 field_token = read32 (ip + 2);
7547 guint32 field_index = field_token & 0xffffff;
7549 const char *data_ptr;
7551 MonoMethod *cmethod;
7552 MonoClass *dummy_class;
7553 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7557 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7561 *out_field_token = field_token;
7563 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7566 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7568 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7569 case MONO_TYPE_BOOLEAN:
7573 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7574 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7575 case MONO_TYPE_CHAR:
7592 if (size > mono_type_size (field->type, &dummy_align))
7595 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7596 if (!image_is_dynamic (method->klass->image)) {
7597 field_index = read32 (ip + 2) & 0xffffff;
7598 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7599 data_ptr = mono_image_rva_map (method->klass->image, rva);
7600 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7601 /* for aot code we do the lookup on load */
7602 if (aot && data_ptr)
7603 return (const char *)GUINT_TO_POINTER (rva);
7605 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7607 data_ptr = mono_field_get_data (field);
7615 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7617 char *method_fname = mono_method_full_name (method, TRUE);
7619 MonoMethodHeader *header = mono_method_get_header (method);
7621 if (header->code_size == 0)
7622 method_code = g_strdup ("method body is empty.");
7624 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7625 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7626 g_free (method_fname);
7627 g_free (method_code);
7628 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7632 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7635 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7636 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7637 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7638 /* Optimize reg-reg moves away */
7640 * Can't optimize other opcodes, since sp[0] might point to
7641 * the last ins of a decomposed opcode.
7643 sp [0]->dreg = (cfg)->locals [n]->dreg;
7645 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7650 * ldloca inhibits many optimizations so try to get rid of it in common
7653 static inline unsigned char *
7654 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7664 local = read16 (ip + 2);
7668 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7669 /* From the INITOBJ case */
7670 token = read32 (ip + 2);
7671 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7672 CHECK_TYPELOAD (klass);
7673 type = mini_get_underlying_type (&klass->byval_arg);
7674 emit_init_local (cfg, local, type, TRUE);
7682 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7684 MonoInst *icall_args [16];
7685 MonoInst *call_target, *ins, *vtable_ins;
7686 int arg_reg, this_reg, vtable_reg;
7687 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7688 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7689 gboolean variant_iface = FALSE;
7694 * In llvm-only mode, vtables contain function descriptors instead of
7695 * method addresses/trampolines.
7697 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7700 slot = mono_method_get_imt_slot (cmethod);
7702 slot = mono_method_get_vtable_index (cmethod);
7704 this_reg = sp [0]->dreg;
7706 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7707 variant_iface = TRUE;
7709 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7711 * The simplest case, a normal virtual call.
7713 int slot_reg = alloc_preg (cfg);
7714 int addr_reg = alloc_preg (cfg);
7715 int arg_reg = alloc_preg (cfg);
7716 MonoBasicBlock *non_null_bb;
7718 vtable_reg = alloc_preg (cfg);
7719 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7720 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7722 /* Load the vtable slot, which contains a function descriptor. */
7723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7725 NEW_BBLOCK (cfg, non_null_bb);
7727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7728 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7732 // FIXME: Make the wrapper use the preserveall cconv
7733 // FIXME: Use one icall per slot for small slot numbers ?
7734 icall_args [0] = vtable_ins;
7735 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7736 /* Make the icall return the vtable slot value to save some code space */
7737 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7738 ins->dreg = slot_reg;
7739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7742 MONO_START_BB (cfg, non_null_bb);
7743 /* Load the address + arg from the vtable slot */
7744 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7747 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7750 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7752 * A simple interface call
7754 * We make a call through an imt slot to obtain the function descriptor we need to call.
7755 * The imt slot contains a function descriptor for a runtime function + arg.
7757 int slot_reg = alloc_preg (cfg);
7758 int addr_reg = alloc_preg (cfg);
7759 int arg_reg = alloc_preg (cfg);
7760 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7762 vtable_reg = alloc_preg (cfg);
7763 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7764 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7767 * The slot is already initialized when the vtable is created so there is no need
7771 /* Load the imt slot, which contains a function descriptor. */
7772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7774 /* Load the address + arg of the imt thunk from the imt slot */
7775 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7776 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7778 * IMT thunks in llvm-only mode are C functions which take an info argument
7779 * plus the imt method and return the ftndesc to call.
7781 icall_args [0] = thunk_arg_ins;
7782 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7783 cmethod, MONO_RGCTX_INFO_METHOD);
7784 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7786 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7789 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7791 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7792 * dynamically extended as more instantiations are discovered.
7793 * This handles generic virtual methods both on classes and interfaces.
7795 int slot_reg = alloc_preg (cfg);
7796 int addr_reg = alloc_preg (cfg);
7797 int arg_reg = alloc_preg (cfg);
7798 int ftndesc_reg = alloc_preg (cfg);
7799 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7800 MonoBasicBlock *slowpath_bb, *end_bb;
7802 NEW_BBLOCK (cfg, slowpath_bb);
7803 NEW_BBLOCK (cfg, end_bb);
7805 vtable_reg = alloc_preg (cfg);
7806 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7808 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7810 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7812 /* Load the slot, which contains a function descriptor. */
7813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7815 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7816 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7821 /* Same as with iface calls */
7822 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7823 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7824 icall_args [0] = thunk_arg_ins;
7825 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7826 cmethod, MONO_RGCTX_INFO_METHOD);
7827 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7828 ftndesc_ins->dreg = ftndesc_reg;
7830 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7831 * they don't know about yet. Fall back to the slowpath in that case.
7833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7834 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7839 MONO_START_BB (cfg, slowpath_bb);
7840 icall_args [0] = vtable_ins;
7841 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7842 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7843 cmethod, MONO_RGCTX_INFO_METHOD);
7845 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7847 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7848 ftndesc_ins->dreg = ftndesc_reg;
7849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7852 MONO_START_BB (cfg, end_bb);
7853 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7857 * Non-optimized cases
7859 icall_args [0] = sp [0];
7860 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7862 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7863 cmethod, MONO_RGCTX_INFO_METHOD);
7865 arg_reg = alloc_preg (cfg);
7866 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7867 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7869 g_assert (is_gsharedvt);
7871 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7873 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7876 * Pass the extra argument even if the callee doesn't receive it, most
7877 * calling conventions allow this.
7879 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7883 is_exception_class (MonoClass *klass)
7886 if (klass == mono_defaults.exception_class)
7888 klass = klass->parent;
7894 * is_jit_optimizer_disabled:
7896 * Determine whenever M's assembly has a DebuggableAttribute with the
7897 * IsJITOptimizerDisabled flag set.
7900 is_jit_optimizer_disabled (MonoMethod *m)
7902 MonoAssembly *ass = m->klass->image->assembly;
7903 MonoCustomAttrInfo* attrs;
7906 gboolean val = FALSE;
7909 if (ass->jit_optimizer_disabled_inited)
7910 return ass->jit_optimizer_disabled;
7912 klass = mono_class_try_get_debuggable_attribute_class ();
7916 ass->jit_optimizer_disabled = FALSE;
7917 mono_memory_barrier ();
7918 ass->jit_optimizer_disabled_inited = TRUE;
7922 attrs = mono_custom_attrs_from_assembly (ass);
7924 for (i = 0; i < attrs->num_attrs; ++i) {
7925 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7927 MonoMethodSignature *sig;
7929 if (!attr->ctor || attr->ctor->klass != klass)
7931 /* Decode the attribute. See reflection.c */
7932 p = (const char*)attr->data;
7933 g_assert (read16 (p) == 0x0001);
7936 // FIXME: Support named parameters
7937 sig = mono_method_signature (attr->ctor);
7938 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7940 /* Two boolean arguments */
7944 mono_custom_attrs_free (attrs);
7947 ass->jit_optimizer_disabled = val;
7948 mono_memory_barrier ();
7949 ass->jit_optimizer_disabled_inited = TRUE;
7955 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7957 gboolean supported_tail_call;
7960 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7962 for (i = 0; i < fsig->param_count; ++i) {
7963 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7964 /* These can point to the current method's stack */
7965 supported_tail_call = FALSE;
7967 if (fsig->hasthis && cmethod->klass->valuetype)
7968 /* this might point to the current method's stack */
7969 supported_tail_call = FALSE;
7970 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7971 supported_tail_call = FALSE;
7972 if (cfg->method->save_lmf)
7973 supported_tail_call = FALSE;
7974 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7975 supported_tail_call = FALSE;
7976 if (call_opcode != CEE_CALL)
7977 supported_tail_call = FALSE;
7979 /* Debugging support */
7981 if (supported_tail_call) {
7982 if (!mono_debug_count ())
7983 supported_tail_call = FALSE;
7987 return supported_tail_call;
7993 * Handle calls made to ctors from NEWOBJ opcodes.
7996 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7997 MonoInst **sp, guint8 *ip, int *inline_costs)
7999 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8001 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8002 mono_method_is_generic_sharable (cmethod, TRUE)) {
8003 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8004 mono_class_vtable (cfg->domain, cmethod->klass);
8005 CHECK_TYPELOAD (cmethod->klass);
8007 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8008 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8011 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8012 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8014 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8016 CHECK_TYPELOAD (cmethod->klass);
8017 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8022 /* Avoid virtual calls to ctors if possible */
8023 if (mono_class_is_marshalbyref (cmethod->klass))
8024 callvirt_this_arg = sp [0];
8026 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8027 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8028 CHECK_CFG_EXCEPTION;
8029 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8030 mono_method_check_inlining (cfg, cmethod) &&
8031 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8034 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8035 cfg->real_offset += 5;
8037 *inline_costs += costs - 5;
8039 INLINE_FAILURE ("inline failure");
8040 // FIXME-VT: Clean this up
8041 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8042 GSHAREDVT_FAILURE(*ip);
8043 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8045 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8048 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8050 if (cfg->llvm_only) {
8051 // FIXME: Avoid initializing vtable_arg
8052 emit_llvmonly_calli (cfg, fsig, sp, addr);
8054 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8056 } else if (context_used &&
8057 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8058 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8059 MonoInst *cmethod_addr;
8061 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8063 if (cfg->llvm_only) {
8064 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8065 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8066 emit_llvmonly_calli (cfg, fsig, sp, addr);
8068 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8069 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8071 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8074 INLINE_FAILURE ("ctor call");
8075 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8076 callvirt_this_arg, NULL, vtable_arg);
8083 emit_setret (MonoCompile *cfg, MonoInst *val)
8085 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8088 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8091 if (!cfg->vret_addr) {
8092 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8094 EMIT_NEW_RETLOADA (cfg, ret_addr);
8096 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8097 ins->klass = mono_class_from_mono_type (ret_type);
8100 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8101 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8102 MonoInst *iargs [1];
8106 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8107 mono_arch_emit_setret (cfg, cfg->method, conv);
8109 mono_arch_emit_setret (cfg, cfg->method, val);
8112 mono_arch_emit_setret (cfg, cfg->method, val);
8118 * mono_method_to_ir:
8120 * Translate the .net IL into linear IR.
8123 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8124 MonoInst *return_var, MonoInst **inline_args,
8125 guint inline_offset, gboolean is_virtual_call)
8128 MonoInst *ins, **sp, **stack_start;
8129 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8130 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8131 MonoMethod *cmethod, *method_definition;
8132 MonoInst **arg_array;
8133 MonoMethodHeader *header;
8135 guint32 token, ins_flag;
8137 MonoClass *constrained_class = NULL;
8138 unsigned char *ip, *end, *target, *err_pos;
8139 MonoMethodSignature *sig;
8140 MonoGenericContext *generic_context = NULL;
8141 MonoGenericContainer *generic_container = NULL;
8142 MonoType **param_types;
8143 int i, n, start_new_bblock, dreg;
8144 int num_calls = 0, inline_costs = 0;
8145 int breakpoint_id = 0;
8147 GSList *class_inits = NULL;
8148 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8150 gboolean init_locals, seq_points, skip_dead_blocks;
8151 gboolean sym_seq_points = FALSE;
8152 MonoDebugMethodInfo *minfo;
8153 MonoBitSet *seq_point_locs = NULL;
8154 MonoBitSet *seq_point_set_locs = NULL;
8156 cfg->disable_inline = is_jit_optimizer_disabled (method);
8158 /* serialization and xdomain stuff may need access to private fields and methods */
8159 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8160 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8161 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8162 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8163 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8164 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8166 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8167 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8168 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8169 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8170 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8172 image = method->klass->image;
8173 header = mono_method_get_header (method);
8175 if (mono_loader_get_last_error ()) {
8176 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8177 mono_error_set_from_loader_error (&cfg->error);
8179 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name));
8181 goto exception_exit;
8183 generic_container = mono_method_get_generic_container (method);
8184 sig = mono_method_signature (method);
8185 num_args = sig->hasthis + sig->param_count;
8186 ip = (unsigned char*)header->code;
8187 cfg->cil_start = ip;
8188 end = ip + header->code_size;
8189 cfg->stat_cil_code_size += header->code_size;
8191 seq_points = cfg->gen_seq_points && cfg->method == method;
8193 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8194 /* We could hit a seq point before attaching to the JIT (#8338) */
8198 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8199 minfo = mono_debug_lookup_method (method);
8201 MonoSymSeqPoint *sps;
8202 int i, n_il_offsets;
8204 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8205 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8206 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8207 sym_seq_points = TRUE;
8208 for (i = 0; i < n_il_offsets; ++i) {
8209 if (sps [i].il_offset < header->code_size)
8210 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8213 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8214 /* Methods without line number info like auto-generated property accessors */
8215 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8216 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8217 sym_seq_points = TRUE;
8222 * Methods without init_locals set could cause asserts in various passes
8223 * (#497220). To work around this, we emit dummy initialization opcodes
8224 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8225 * on some platforms.
8227 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8228 init_locals = header->init_locals;
8232 method_definition = method;
8233 while (method_definition->is_inflated) {
8234 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8235 method_definition = imethod->declaring;
8238 /* SkipVerification is not allowed if core-clr is enabled */
8239 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8241 dont_verify_stloc = TRUE;
8244 if (sig->is_inflated)
8245 generic_context = mono_method_get_context (method);
8246 else if (generic_container)
8247 generic_context = &generic_container->context;
8248 cfg->generic_context = generic_context;
8251 g_assert (!sig->has_type_parameters);
8253 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8254 g_assert (method->is_inflated);
8255 g_assert (mono_method_get_context (method)->method_inst);
8257 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8258 g_assert (sig->generic_param_count);
8260 if (cfg->method == method) {
8261 cfg->real_offset = 0;
8263 cfg->real_offset = inline_offset;
8266 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8267 cfg->cil_offset_to_bb_len = header->code_size;
8269 cfg->current_method = method;
8271 if (cfg->verbose_level > 2)
8272 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8274 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8276 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8277 for (n = 0; n < sig->param_count; ++n)
8278 param_types [n + sig->hasthis] = sig->params [n];
8279 cfg->arg_types = param_types;
8281 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8282 if (cfg->method == method) {
8284 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8285 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8288 NEW_BBLOCK (cfg, start_bblock);
8289 cfg->bb_entry = start_bblock;
8290 start_bblock->cil_code = NULL;
8291 start_bblock->cil_length = 0;
8294 NEW_BBLOCK (cfg, end_bblock);
8295 cfg->bb_exit = end_bblock;
8296 end_bblock->cil_code = NULL;
8297 end_bblock->cil_length = 0;
8298 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8299 g_assert (cfg->num_bblocks == 2);
8301 arg_array = cfg->args;
8303 if (header->num_clauses) {
8304 cfg->spvars = g_hash_table_new (NULL, NULL);
8305 cfg->exvars = g_hash_table_new (NULL, NULL);
8307 /* handle exception clauses */
8308 for (i = 0; i < header->num_clauses; ++i) {
8309 MonoBasicBlock *try_bb;
8310 MonoExceptionClause *clause = &header->clauses [i];
8311 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8313 try_bb->real_offset = clause->try_offset;
8314 try_bb->try_start = TRUE;
8315 try_bb->region = ((i + 1) << 8) | clause->flags;
8316 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8317 tblock->real_offset = clause->handler_offset;
8318 tblock->flags |= BB_EXCEPTION_HANDLER;
8321 * Linking the try block with the EH block hinders inlining as we won't be able to
8322 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8324 if (COMPILE_LLVM (cfg))
8325 link_bblock (cfg, try_bb, tblock);
8327 if (*(ip + clause->handler_offset) == CEE_POP)
8328 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8330 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8331 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8332 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8333 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8334 MONO_ADD_INS (tblock, ins);
8336 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8337 /* finally clauses already have a seq point */
8338 /* seq points for filter clauses are emitted below */
8339 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8340 MONO_ADD_INS (tblock, ins);
8343 /* todo: is a fault block unsafe to optimize? */
8344 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8345 tblock->flags |= BB_EXCEPTION_UNSAFE;
8348 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8350 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8352 /* catch and filter blocks get the exception object on the stack */
8353 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8354 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8356 /* mostly like handle_stack_args (), but just sets the input args */
8357 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8358 tblock->in_scount = 1;
8359 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8360 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8364 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8365 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8366 if (!cfg->compile_llvm) {
8367 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8368 ins->dreg = tblock->in_stack [0]->dreg;
8369 MONO_ADD_INS (tblock, ins);
8372 MonoInst *dummy_use;
8375 * Add a dummy use for the exvar so its liveness info will be
8378 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8381 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8382 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8383 MONO_ADD_INS (tblock, ins);
8386 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8387 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8388 tblock->flags |= BB_EXCEPTION_HANDLER;
8389 tblock->real_offset = clause->data.filter_offset;
8390 tblock->in_scount = 1;
8391 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8392 /* The filter block shares the exvar with the handler block */
8393 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8394 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8395 MONO_ADD_INS (tblock, ins);
8399 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8400 clause->data.catch_class &&
8402 mono_class_check_context_used (clause->data.catch_class)) {
8404 * In shared generic code with catch
8405 * clauses containing type variables
8406 * the exception handling code has to
8407 * be able to get to the rgctx.
8408 * Therefore we have to make sure that
8409 * the vtable/mrgctx argument (for
8410 * static or generic methods) or the
8411 * "this" argument (for non-static
8412 * methods) are live.
8414 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8415 mini_method_get_context (method)->method_inst ||
8416 method->klass->valuetype) {
8417 mono_get_vtable_var (cfg);
8419 MonoInst *dummy_use;
8421 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8426 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8427 cfg->cbb = start_bblock;
8428 cfg->args = arg_array;
8429 mono_save_args (cfg, sig, inline_args);
8432 /* FIRST CODE BLOCK */
8433 NEW_BBLOCK (cfg, tblock);
8434 tblock->cil_code = ip;
8438 ADD_BBLOCK (cfg, tblock);
8440 if (cfg->method == method) {
8441 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8442 if (breakpoint_id) {
8443 MONO_INST_NEW (cfg, ins, OP_BREAK);
8444 MONO_ADD_INS (cfg->cbb, ins);
8448 /* we use a separate basic block for the initialization code */
8449 NEW_BBLOCK (cfg, init_localsbb);
8450 cfg->bb_init = init_localsbb;
8451 init_localsbb->real_offset = cfg->real_offset;
8452 start_bblock->next_bb = init_localsbb;
8453 init_localsbb->next_bb = cfg->cbb;
8454 link_bblock (cfg, start_bblock, init_localsbb);
8455 link_bblock (cfg, init_localsbb, cfg->cbb);
8457 cfg->cbb = init_localsbb;
8459 if (cfg->gsharedvt && cfg->method == method) {
8460 MonoGSharedVtMethodInfo *info;
8461 MonoInst *var, *locals_var;
8464 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8465 info->method = cfg->method;
8466 info->count_entries = 16;
8467 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8468 cfg->gsharedvt_info = info;
8470 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8471 /* prevent it from being register allocated */
8472 //var->flags |= MONO_INST_VOLATILE;
8473 cfg->gsharedvt_info_var = var;
8475 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8476 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8478 /* Allocate locals */
8479 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8480 /* prevent it from being register allocated */
8481 //locals_var->flags |= MONO_INST_VOLATILE;
8482 cfg->gsharedvt_locals_var = locals_var;
8484 dreg = alloc_ireg (cfg);
8485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8487 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8488 ins->dreg = locals_var->dreg;
8490 MONO_ADD_INS (cfg->cbb, ins);
8491 cfg->gsharedvt_locals_var_ins = ins;
8493 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8496 ins->flags |= MONO_INST_INIT;
8500 if (mono_security_core_clr_enabled ()) {
8501 /* check if this is native code, e.g. an icall or a p/invoke */
8502 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8503 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8505 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8506 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8508 /* if this ia a native call then it can only be JITted from platform code */
8509 if ((icall || pinvk) && method->klass && method->klass->image) {
8510 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8511 MonoException *ex = icall ? mono_get_exception_security () :
8512 mono_get_exception_method_access ();
8513 emit_throw_exception (cfg, ex);
8520 CHECK_CFG_EXCEPTION;
8522 if (header->code_size == 0)
8525 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8530 if (cfg->method == method)
8531 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8533 for (n = 0; n < header->num_locals; ++n) {
8534 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8539 /* We force the vtable variable here for all shared methods
8540 for the possibility that they might show up in a stack
8541 trace where their exact instantiation is needed. */
8542 if (cfg->gshared && method == cfg->method) {
8543 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8544 mini_method_get_context (method)->method_inst ||
8545 method->klass->valuetype) {
8546 mono_get_vtable_var (cfg);
8548 /* FIXME: Is there a better way to do this?
8549 We need the variable live for the duration
8550 of the whole method. */
8551 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8555 /* add a check for this != NULL to inlined methods */
8556 if (is_virtual_call) {
8559 NEW_ARGLOAD (cfg, arg_ins, 0);
8560 MONO_ADD_INS (cfg->cbb, arg_ins);
8561 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8564 skip_dead_blocks = !dont_verify;
8565 if (skip_dead_blocks) {
8566 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8571 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8572 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8575 start_new_bblock = 0;
8577 if (cfg->method == method)
8578 cfg->real_offset = ip - header->code;
8580 cfg->real_offset = inline_offset;
8585 if (start_new_bblock) {
8586 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8587 if (start_new_bblock == 2) {
8588 g_assert (ip == tblock->cil_code);
8590 GET_BBLOCK (cfg, tblock, ip);
8592 cfg->cbb->next_bb = tblock;
8594 start_new_bblock = 0;
8595 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8596 if (cfg->verbose_level > 3)
8597 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8598 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8602 g_slist_free (class_inits);
8605 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8606 link_bblock (cfg, cfg->cbb, tblock);
8607 if (sp != stack_start) {
8608 handle_stack_args (cfg, stack_start, sp - stack_start);
8610 CHECK_UNVERIFIABLE (cfg);
8612 cfg->cbb->next_bb = tblock;
8614 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8615 if (cfg->verbose_level > 3)
8616 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8617 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8620 g_slist_free (class_inits);
8625 if (skip_dead_blocks) {
8626 int ip_offset = ip - header->code;
8628 if (ip_offset == bb->end)
8632 int op_size = mono_opcode_size (ip, end);
8633 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8635 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8637 if (ip_offset + op_size == bb->end) {
8638 MONO_INST_NEW (cfg, ins, OP_NOP);
8639 MONO_ADD_INS (cfg->cbb, ins);
8640 start_new_bblock = 1;
8648 * Sequence points are points where the debugger can place a breakpoint.
8649 * Currently, we generate these automatically at points where the IL
8652 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8654 * Make methods interruptable at the beginning, and at the targets of
8655 * backward branches.
8656 * Also, do this at the start of every bblock in methods with clauses too,
8657 * to be able to handle instructions with inprecise control flow like
8659 * Backward branches are handled at the end of method-to-ir ().
8661 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8662 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8664 /* Avoid sequence points on empty IL like .volatile */
8665 // FIXME: Enable this
8666 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8667 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8668 if ((sp != stack_start) && !sym_seq_point)
8669 ins->flags |= MONO_INST_NONEMPTY_STACK;
8670 MONO_ADD_INS (cfg->cbb, ins);
8673 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8676 cfg->cbb->real_offset = cfg->real_offset;
8678 if ((cfg->method == method) && cfg->coverage_info) {
8679 guint32 cil_offset = ip - header->code;
8680 cfg->coverage_info->data [cil_offset].cil_code = ip;
8682 /* TODO: Use an increment here */
8683 #if defined(TARGET_X86)
8684 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8685 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8687 MONO_ADD_INS (cfg->cbb, ins);
8689 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8690 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8694 if (cfg->verbose_level > 3)
8695 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8699 if (seq_points && !sym_seq_points && sp != stack_start) {
8701 * The C# compiler uses these nops to notify the JIT that it should
8702 * insert seq points.
8704 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8705 MONO_ADD_INS (cfg->cbb, ins);
8707 if (cfg->keep_cil_nops)
8708 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8710 MONO_INST_NEW (cfg, ins, OP_NOP);
8712 MONO_ADD_INS (cfg->cbb, ins);
8715 if (should_insert_brekpoint (cfg->method)) {
8716 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8718 MONO_INST_NEW (cfg, ins, OP_NOP);
8721 MONO_ADD_INS (cfg->cbb, ins);
8727 CHECK_STACK_OVF (1);
8728 n = (*ip)-CEE_LDARG_0;
8730 EMIT_NEW_ARGLOAD (cfg, ins, n);
8738 CHECK_STACK_OVF (1);
8739 n = (*ip)-CEE_LDLOC_0;
8741 EMIT_NEW_LOCLOAD (cfg, ins, n);
8750 n = (*ip)-CEE_STLOC_0;
8753 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8755 emit_stloc_ir (cfg, sp, header, n);
8762 CHECK_STACK_OVF (1);
8765 EMIT_NEW_ARGLOAD (cfg, ins, n);
8771 CHECK_STACK_OVF (1);
8774 NEW_ARGLOADA (cfg, ins, n);
8775 MONO_ADD_INS (cfg->cbb, ins);
8785 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8787 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8792 CHECK_STACK_OVF (1);
8795 EMIT_NEW_LOCLOAD (cfg, ins, n);
8799 case CEE_LDLOCA_S: {
8800 unsigned char *tmp_ip;
8802 CHECK_STACK_OVF (1);
8803 CHECK_LOCAL (ip [1]);
8805 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8811 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8820 CHECK_LOCAL (ip [1]);
8821 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8823 emit_stloc_ir (cfg, sp, header, ip [1]);
8828 CHECK_STACK_OVF (1);
8829 EMIT_NEW_PCONST (cfg, ins, NULL);
8830 ins->type = STACK_OBJ;
8835 CHECK_STACK_OVF (1);
8836 EMIT_NEW_ICONST (cfg, ins, -1);
8849 CHECK_STACK_OVF (1);
8850 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8856 CHECK_STACK_OVF (1);
8858 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8864 CHECK_STACK_OVF (1);
8865 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8871 CHECK_STACK_OVF (1);
8872 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8873 ins->type = STACK_I8;
8874 ins->dreg = alloc_dreg (cfg, STACK_I8);
8876 ins->inst_l = (gint64)read64 (ip);
8877 MONO_ADD_INS (cfg->cbb, ins);
8883 gboolean use_aotconst = FALSE;
8885 #ifdef TARGET_POWERPC
8886 /* FIXME: Clean this up */
8887 if (cfg->compile_aot)
8888 use_aotconst = TRUE;
8891 /* FIXME: we should really allocate this only late in the compilation process */
8892 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8894 CHECK_STACK_OVF (1);
8900 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8902 dreg = alloc_freg (cfg);
8903 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8904 ins->type = cfg->r4_stack_type;
8906 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8907 ins->type = cfg->r4_stack_type;
8908 ins->dreg = alloc_dreg (cfg, STACK_R8);
8910 MONO_ADD_INS (cfg->cbb, ins);
8920 gboolean use_aotconst = FALSE;
8922 #ifdef TARGET_POWERPC
8923 /* FIXME: Clean this up */
8924 if (cfg->compile_aot)
8925 use_aotconst = TRUE;
8928 /* FIXME: we should really allocate this only late in the compilation process */
8929 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8931 CHECK_STACK_OVF (1);
8937 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8939 dreg = alloc_freg (cfg);
8940 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8941 ins->type = STACK_R8;
8943 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8944 ins->type = STACK_R8;
8945 ins->dreg = alloc_dreg (cfg, STACK_R8);
8947 MONO_ADD_INS (cfg->cbb, ins);
8956 MonoInst *temp, *store;
8958 CHECK_STACK_OVF (1);
8962 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8963 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8965 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8968 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8981 if (sp [0]->type == STACK_R8)
8982 /* we need to pop the value from the x86 FP stack */
8983 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8988 MonoMethodSignature *fsig;
8991 INLINE_FAILURE ("jmp");
8992 GSHAREDVT_FAILURE (*ip);
8995 if (stack_start != sp)
8997 token = read32 (ip + 1);
8998 /* FIXME: check the signature matches */
8999 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9002 if (cfg->gshared && mono_method_check_context_used (cmethod))
9003 GENERIC_SHARING_FAILURE (CEE_JMP);
9005 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9007 fsig = mono_method_signature (cmethod);
9008 n = fsig->param_count + fsig->hasthis;
9009 if (cfg->llvm_only) {
9012 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9013 for (i = 0; i < n; ++i)
9014 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9015 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9017 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9018 * have to emit a normal return since llvm expects it.
9021 emit_setret (cfg, ins);
9022 MONO_INST_NEW (cfg, ins, OP_BR);
9023 ins->inst_target_bb = end_bblock;
9024 MONO_ADD_INS (cfg->cbb, ins);
9025 link_bblock (cfg, cfg->cbb, end_bblock);
9028 } else if (cfg->backend->have_op_tail_call) {
9029 /* Handle tail calls similarly to calls */
9032 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9033 call->method = cmethod;
9034 call->tail_call = TRUE;
9035 call->signature = mono_method_signature (cmethod);
9036 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9037 call->inst.inst_p0 = cmethod;
9038 for (i = 0; i < n; ++i)
9039 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9041 mono_arch_emit_call (cfg, call);
9042 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9043 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9045 for (i = 0; i < num_args; ++i)
9046 /* Prevent arguments from being optimized away */
9047 arg_array [i]->flags |= MONO_INST_VOLATILE;
9049 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9050 ins = (MonoInst*)call;
9051 ins->inst_p0 = cmethod;
9052 MONO_ADD_INS (cfg->cbb, ins);
9056 start_new_bblock = 1;
9061 MonoMethodSignature *fsig;
9064 token = read32 (ip + 1);
9068 //GSHAREDVT_FAILURE (*ip);
9073 fsig = mini_get_signature (method, token, generic_context);
9075 if (method->dynamic && fsig->pinvoke) {
9079 * This is a call through a function pointer using a pinvoke
9080 * signature. Have to create a wrapper and call that instead.
9081 * FIXME: This is very slow, need to create a wrapper at JIT time
9082 * instead based on the signature.
9084 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9085 EMIT_NEW_PCONST (cfg, args [1], fsig);
9087 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9090 n = fsig->param_count + fsig->hasthis;
9094 //g_assert (!virtual_ || fsig->hasthis);
9098 inline_costs += 10 * num_calls++;
9101 * Making generic calls out of gsharedvt methods.
9102 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9103 * patching gshared method addresses into a gsharedvt method.
9105 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9107 * We pass the address to the gsharedvt trampoline in the rgctx reg
9109 MonoInst *callee = addr;
9111 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9113 GSHAREDVT_FAILURE (*ip);
9117 GSHAREDVT_FAILURE (*ip);
9119 addr = emit_get_rgctx_sig (cfg, context_used,
9120 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9121 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9125 /* Prevent inlining of methods with indirect calls */
9126 INLINE_FAILURE ("indirect call");
9128 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9129 MonoJumpInfoType info_type;
9133 * Instead of emitting an indirect call, emit a direct call
9134 * with the contents of the aotconst as the patch info.
9136 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9137 info_type = (MonoJumpInfoType)addr->inst_c1;
9138 info_data = addr->inst_p0;
9140 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9141 info_data = addr->inst_right->inst_left;
9144 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9145 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9150 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9154 /* End of call, INS should contain the result of the call, if any */
9156 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9158 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9161 CHECK_CFG_EXCEPTION;
9165 constrained_class = NULL;
9169 case CEE_CALLVIRT: {
9170 MonoInst *addr = NULL;
9171 MonoMethodSignature *fsig = NULL;
9173 int virtual_ = *ip == CEE_CALLVIRT;
9174 gboolean pass_imt_from_rgctx = FALSE;
9175 MonoInst *imt_arg = NULL;
9176 MonoInst *keep_this_alive = NULL;
9177 gboolean pass_vtable = FALSE;
9178 gboolean pass_mrgctx = FALSE;
9179 MonoInst *vtable_arg = NULL;
9180 gboolean check_this = FALSE;
9181 gboolean supported_tail_call = FALSE;
9182 gboolean tail_call = FALSE;
9183 gboolean need_seq_point = FALSE;
9184 guint32 call_opcode = *ip;
9185 gboolean emit_widen = TRUE;
9186 gboolean push_res = TRUE;
9187 gboolean skip_ret = FALSE;
9188 gboolean delegate_invoke = FALSE;
9189 gboolean direct_icall = FALSE;
9190 gboolean constrained_partial_call = FALSE;
9191 MonoMethod *cil_method;
9194 token = read32 (ip + 1);
9198 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9201 cil_method = cmethod;
9203 if (constrained_class) {
9204 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9205 if (!mini_is_gsharedvt_klass (constrained_class)) {
9206 g_assert (!cmethod->klass->valuetype);
9207 if (!mini_type_is_reference (&constrained_class->byval_arg))
9208 constrained_partial_call = TRUE;
9212 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9213 if (cfg->verbose_level > 2)
9214 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9215 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9216 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9218 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9222 if (cfg->verbose_level > 2)
9223 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9225 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9227 * This is needed since get_method_constrained can't find
9228 * the method in klass representing a type var.
9229 * The type var is guaranteed to be a reference type in this
9232 if (!mini_is_gsharedvt_klass (constrained_class))
9233 g_assert (!cmethod->klass->valuetype);
9235 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9241 if (!cmethod || mono_loader_get_last_error ()) {
9242 if (mono_loader_get_last_error ()) {
9243 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9244 mono_error_set_from_loader_error (&cfg->error);
9250 if (!dont_verify && !cfg->skip_visibility) {
9251 MonoMethod *target_method = cil_method;
9252 if (method->is_inflated) {
9253 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9256 if (!mono_method_can_access_method (method_definition, target_method) &&
9257 !mono_method_can_access_method (method, cil_method))
9258 METHOD_ACCESS_FAILURE (method, cil_method);
9261 if (mono_security_core_clr_enabled ())
9262 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9264 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9265 /* MS.NET seems to silently convert this to a callvirt */
9270 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9271 * converts to a callvirt.
9273 * tests/bug-515884.il is an example of this behavior
9275 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9276 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9277 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9281 if (!cmethod->klass->inited)
9282 if (!mono_class_init (cmethod->klass))
9283 TYPE_LOAD_ERROR (cmethod->klass);
9285 fsig = mono_method_signature (cmethod);
9288 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9289 mini_class_is_system_array (cmethod->klass)) {
9290 array_rank = cmethod->klass->rank;
9291 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9292 direct_icall = TRUE;
9293 } else if (fsig->pinvoke) {
9294 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9295 fsig = mono_method_signature (wrapper);
9296 } else if (constrained_class) {
9298 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9302 if (cfg->llvm_only && !cfg->method->wrapper_type)
9303 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9305 /* See code below */
9306 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9307 MonoBasicBlock *tbb;
9309 GET_BBLOCK (cfg, tbb, ip + 5);
9310 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9312 * We want to extend the try block to cover the call, but we can't do it if the
9313 * call is made directly since its followed by an exception check.
9315 direct_icall = FALSE;
9319 mono_save_token_info (cfg, image, token, cil_method);
9321 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9322 need_seq_point = TRUE;
9324 /* Don't support calls made using type arguments for now */
9326 if (cfg->gsharedvt) {
9327 if (mini_is_gsharedvt_signature (fsig))
9328 GSHAREDVT_FAILURE (*ip);
9332 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9333 g_assert_not_reached ();
9335 n = fsig->param_count + fsig->hasthis;
9337 if (!cfg->gshared && cmethod->klass->generic_container)
9341 g_assert (!mono_method_check_context_used (cmethod));
9345 //g_assert (!virtual_ || fsig->hasthis);
9350 * We have the `constrained.' prefix opcode.
9352 if (constrained_class) {
9353 if (mini_is_gsharedvt_klass (constrained_class)) {
9354 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9355 /* The 'Own method' case below */
9356 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9357 /* 'The type parameter is instantiated as a reference type' case below. */
9359 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9360 CHECK_CFG_EXCEPTION;
9366 if (constrained_partial_call) {
9367 gboolean need_box = TRUE;
9370 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9371 * called method is not known at compile time either. The called method could end up being
9372 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9373 * to box the receiver.
9374 * A simple solution would be to box always and make a normal virtual call, but that would
9375 * be bad performance wise.
9377 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9379 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9384 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9385 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9386 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9387 ins->klass = constrained_class;
9388 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9389 CHECK_CFG_EXCEPTION;
9390 } else if (need_box) {
9392 MonoBasicBlock *is_ref_bb, *end_bb;
9393 MonoInst *nonbox_call;
9396 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9398 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9399 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9401 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9403 NEW_BBLOCK (cfg, is_ref_bb);
9404 NEW_BBLOCK (cfg, end_bb);
9406 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9408 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9411 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9413 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9416 MONO_START_BB (cfg, is_ref_bb);
9417 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9418 ins->klass = constrained_class;
9419 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9420 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9422 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9424 MONO_START_BB (cfg, end_bb);
9427 nonbox_call->dreg = ins->dreg;
9430 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9431 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9432 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9435 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9437 * The type parameter is instantiated as a valuetype,
9438 * but that type doesn't override the method we're
9439 * calling, so we need to box `this'.
9441 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9442 ins->klass = constrained_class;
9443 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9444 CHECK_CFG_EXCEPTION;
9445 } else if (!constrained_class->valuetype) {
9446 int dreg = alloc_ireg_ref (cfg);
9449 * The type parameter is instantiated as a reference
9450 * type. We have a managed pointer on the stack, so
9451 * we need to dereference it here.
9453 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9454 ins->type = STACK_OBJ;
9457 if (cmethod->klass->valuetype) {
9460 /* Interface method */
9463 mono_class_setup_vtable (constrained_class);
9464 CHECK_TYPELOAD (constrained_class);
9465 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9467 TYPE_LOAD_ERROR (constrained_class);
9468 slot = mono_method_get_vtable_slot (cmethod);
9470 TYPE_LOAD_ERROR (cmethod->klass);
9471 cmethod = constrained_class->vtable [ioffset + slot];
9473 if (cmethod->klass == mono_defaults.enum_class) {
9474 /* Enum implements some interfaces, so treat this as the first case */
9475 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9476 ins->klass = constrained_class;
9477 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9478 CHECK_CFG_EXCEPTION;
9483 constrained_class = NULL;
9486 if (check_call_signature (cfg, fsig, sp))
9489 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9490 delegate_invoke = TRUE;
9492 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9494 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9502 * If the callee is a shared method, then its static cctor
9503 * might not get called after the call was patched.
9505 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9506 emit_class_init (cfg, cmethod->klass);
9507 CHECK_TYPELOAD (cmethod->klass);
9510 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9513 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9515 context_used = mini_method_check_context_used (cfg, cmethod);
9517 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9518 /* Generic method interface
9519 calls are resolved via a
9520 helper function and don't
9522 if (!cmethod_context || !cmethod_context->method_inst)
9523 pass_imt_from_rgctx = TRUE;
9527 * If a shared method calls another
9528 * shared method then the caller must
9529 * have a generic sharing context
9530 * because the magic trampoline
9531 * requires it. FIXME: We shouldn't
9532 * have to force the vtable/mrgctx
9533 * variable here. Instead there
9534 * should be a flag in the cfg to
9535 * request a generic sharing context.
9538 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9539 mono_get_vtable_var (cfg);
9544 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9546 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9548 CHECK_TYPELOAD (cmethod->klass);
9549 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9554 g_assert (!vtable_arg);
9556 if (!cfg->compile_aot) {
9558 * emit_get_rgctx_method () calls mono_class_vtable () so check
9559 * for type load errors before.
9561 mono_class_setup_vtable (cmethod->klass);
9562 CHECK_TYPELOAD (cmethod->klass);
9565 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9567 /* !marshalbyref is needed to properly handle generic methods + remoting */
9568 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9569 MONO_METHOD_IS_FINAL (cmethod)) &&
9570 !mono_class_is_marshalbyref (cmethod->klass)) {
9577 if (pass_imt_from_rgctx) {
9578 g_assert (!pass_vtable);
9580 imt_arg = emit_get_rgctx_method (cfg, context_used,
9581 cmethod, MONO_RGCTX_INFO_METHOD);
9585 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9587 /* Calling virtual generic methods */
9588 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9589 !(MONO_METHOD_IS_FINAL (cmethod) &&
9590 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9591 fsig->generic_param_count &&
9592 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9594 MonoInst *this_temp, *this_arg_temp, *store;
9595 MonoInst *iargs [4];
9597 g_assert (fsig->is_inflated);
9599 /* Prevent inlining of methods that contain indirect calls */
9600 INLINE_FAILURE ("virtual generic call");
9602 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9603 GSHAREDVT_FAILURE (*ip);
9605 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9606 g_assert (!imt_arg);
9608 g_assert (cmethod->is_inflated);
9609 imt_arg = emit_get_rgctx_method (cfg, context_used,
9610 cmethod, MONO_RGCTX_INFO_METHOD);
9611 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9613 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9614 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9615 MONO_ADD_INS (cfg->cbb, store);
9617 /* FIXME: This should be a managed pointer */
9618 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9620 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9621 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9622 cmethod, MONO_RGCTX_INFO_METHOD);
9623 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9624 addr = mono_emit_jit_icall (cfg,
9625 mono_helper_compile_generic_method, iargs);
9627 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9629 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9636 * Implement a workaround for the inherent races involved in locking:
9642 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9643 * try block, the Exit () won't be executed, see:
9644 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9645 * To work around this, we extend such try blocks to include the last x bytes
9646 * of the Monitor.Enter () call.
9648 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9649 MonoBasicBlock *tbb;
9651 GET_BBLOCK (cfg, tbb, ip + 5);
9653 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9654 * from Monitor.Enter like ArgumentNullException.
9656 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9657 /* Mark this bblock as needing to be extended */
9658 tbb->extend_try_block = TRUE;
9662 /* Conversion to a JIT intrinsic */
9663 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9664 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9665 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9672 if ((cfg->opt & MONO_OPT_INLINE) &&
9673 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9674 mono_method_check_inlining (cfg, cmethod)) {
9676 gboolean always = FALSE;
9678 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9679 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9680 /* Prevent inlining of methods that call wrappers */
9681 INLINE_FAILURE ("wrapper call");
9682 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9686 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9688 cfg->real_offset += 5;
9690 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9691 /* *sp is already set by inline_method */
9696 inline_costs += costs;
9702 /* Tail recursion elimination */
9703 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9704 gboolean has_vtargs = FALSE;
9707 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9708 INLINE_FAILURE ("tail call");
9710 /* keep it simple */
9711 for (i = fsig->param_count - 1; i >= 0; i--) {
9712 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9717 for (i = 0; i < n; ++i)
9718 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9719 MONO_INST_NEW (cfg, ins, OP_BR);
9720 MONO_ADD_INS (cfg->cbb, ins);
9721 tblock = start_bblock->out_bb [0];
9722 link_bblock (cfg, cfg->cbb, tblock);
9723 ins->inst_target_bb = tblock;
9724 start_new_bblock = 1;
9726 /* skip the CEE_RET, too */
9727 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9734 inline_costs += 10 * num_calls++;
9737 * Making generic calls out of gsharedvt methods.
9738 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9739 * patching gshared method addresses into a gsharedvt method.
9741 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9742 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9743 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9744 MonoRgctxInfoType info_type;
9747 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9748 //GSHAREDVT_FAILURE (*ip);
9749 // disable for possible remoting calls
9750 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9751 GSHAREDVT_FAILURE (*ip);
9752 if (fsig->generic_param_count) {
9753 /* virtual generic call */
9754 g_assert (!imt_arg);
9755 /* Same as the virtual generic case above */
9756 imt_arg = emit_get_rgctx_method (cfg, context_used,
9757 cmethod, MONO_RGCTX_INFO_METHOD);
9758 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9760 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9761 /* This can happen when we call a fully instantiated iface method */
9762 imt_arg = emit_get_rgctx_method (cfg, context_used,
9763 cmethod, MONO_RGCTX_INFO_METHOD);
9768 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9769 keep_this_alive = sp [0];
9771 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9772 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9774 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9775 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9777 if (cfg->llvm_only) {
9778 // FIXME: Avoid initializing vtable_arg
9779 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9781 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9786 /* Generic sharing */
9789 * Use this if the callee is gsharedvt sharable too, since
9790 * at runtime we might find an instantiation so the call cannot
9791 * be patched (the 'no_patch' code path in mini-trampolines.c).
9793 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9794 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9795 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9796 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9797 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9798 INLINE_FAILURE ("gshared");
9800 g_assert (cfg->gshared && cmethod);
9804 * We are compiling a call to a
9805 * generic method from shared code,
9806 * which means that we have to look up
9807 * the method in the rgctx and do an
9811 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9813 if (cfg->llvm_only) {
9814 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9815 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9817 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9818 // FIXME: Avoid initializing imt_arg/vtable_arg
9819 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9821 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9822 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9827 /* Direct calls to icalls */
9829 MonoMethod *wrapper;
9832 /* Inline the wrapper */
9833 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9835 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9836 g_assert (costs > 0);
9837 cfg->real_offset += 5;
9839 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9840 /* *sp is already set by inline_method */
9845 inline_costs += costs;
9854 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9855 MonoInst *val = sp [fsig->param_count];
9857 if (val->type == STACK_OBJ) {
9858 MonoInst *iargs [2];
9863 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9866 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9867 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9868 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9869 emit_write_barrier (cfg, addr, val);
9870 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9871 GSHAREDVT_FAILURE (*ip);
9872 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9873 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9875 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9876 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9877 if (!cmethod->klass->element_class->valuetype && !readonly)
9878 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9879 CHECK_TYPELOAD (cmethod->klass);
9882 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9885 g_assert_not_reached ();
9892 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9896 /* Tail prefix / tail call optimization */
9898 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9899 /* FIXME: runtime generic context pointer for jumps? */
9900 /* FIXME: handle this for generic sharing eventually */
9901 if ((ins_flag & MONO_INST_TAILCALL) &&
9902 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9903 supported_tail_call = TRUE;
9905 if (supported_tail_call) {
9908 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9909 INLINE_FAILURE ("tail call");
9911 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9913 if (cfg->backend->have_op_tail_call) {
9914 /* Handle tail calls similarly to normal calls */
9917 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9919 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9920 call->tail_call = TRUE;
9921 call->method = cmethod;
9922 call->signature = mono_method_signature (cmethod);
9925 * We implement tail calls by storing the actual arguments into the
9926 * argument variables, then emitting a CEE_JMP.
9928 for (i = 0; i < n; ++i) {
9929 /* Prevent argument from being register allocated */
9930 arg_array [i]->flags |= MONO_INST_VOLATILE;
9931 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9933 ins = (MonoInst*)call;
9934 ins->inst_p0 = cmethod;
9935 ins->inst_p1 = arg_array [0];
9936 MONO_ADD_INS (cfg->cbb, ins);
9937 link_bblock (cfg, cfg->cbb, end_bblock);
9938 start_new_bblock = 1;
9940 // FIXME: Eliminate unreachable epilogs
9943 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9944 * only reachable from this call.
9946 GET_BBLOCK (cfg, tblock, ip + 5);
9947 if (tblock == cfg->cbb || tblock->in_count == 0)
9956 * Synchronized wrappers.
9957 * Its hard to determine where to replace a method with its synchronized
9958 * wrapper without causing an infinite recursion. The current solution is
9959 * to add the synchronized wrapper in the trampolines, and to
9960 * change the called method to a dummy wrapper, and resolve that wrapper
9961 * to the real method in mono_jit_compile_method ().
9963 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9964 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9965 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9966 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9970 * Virtual calls in llvm-only mode.
9972 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9973 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9978 INLINE_FAILURE ("call");
9979 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9980 imt_arg, vtable_arg);
9982 if (tail_call && !cfg->llvm_only) {
9983 link_bblock (cfg, cfg->cbb, end_bblock);
9984 start_new_bblock = 1;
9986 // FIXME: Eliminate unreachable epilogs
9989 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9990 * only reachable from this call.
9992 GET_BBLOCK (cfg, tblock, ip + 5);
9993 if (tblock == cfg->cbb || tblock->in_count == 0)
10000 /* End of call, INS should contain the result of the call, if any */
10002 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10005 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10010 if (keep_this_alive) {
10011 MonoInst *dummy_use;
10013 /* See mono_emit_method_call_full () */
10014 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10017 CHECK_CFG_EXCEPTION;
10021 g_assert (*ip == CEE_RET);
10025 constrained_class = NULL;
10026 if (need_seq_point)
10027 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10031 if (cfg->method != method) {
10032 /* return from inlined method */
10034 * If in_count == 0, that means the ret is unreachable due to
10035 * being preceeded by a throw. In that case, inline_method () will
10036 * handle setting the return value
10037 * (test case: test_0_inline_throw ()).
10039 if (return_var && cfg->cbb->in_count) {
10040 MonoType *ret_type = mono_method_signature (method)->ret;
10046 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10049 //g_assert (returnvar != -1);
10050 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10051 cfg->ret_var_set = TRUE;
10054 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10056 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10057 emit_pop_lmf (cfg);
10060 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10062 if (seq_points && !sym_seq_points) {
10064 * Place a seq point here too even through the IL stack is not
10065 * empty, so a step over on
10068 * will work correctly.
10070 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10071 MONO_ADD_INS (cfg->cbb, ins);
10074 g_assert (!return_var);
10078 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10081 emit_setret (cfg, *sp);
10084 if (sp != stack_start)
10086 MONO_INST_NEW (cfg, ins, OP_BR);
10088 ins->inst_target_bb = end_bblock;
10089 MONO_ADD_INS (cfg->cbb, ins);
10090 link_bblock (cfg, cfg->cbb, end_bblock);
10091 start_new_bblock = 1;
10095 MONO_INST_NEW (cfg, ins, OP_BR);
10097 target = ip + 1 + (signed char)(*ip);
10099 GET_BBLOCK (cfg, tblock, target);
10100 link_bblock (cfg, cfg->cbb, tblock);
10101 ins->inst_target_bb = tblock;
10102 if (sp != stack_start) {
10103 handle_stack_args (cfg, stack_start, sp - stack_start);
10105 CHECK_UNVERIFIABLE (cfg);
10107 MONO_ADD_INS (cfg->cbb, ins);
10108 start_new_bblock = 1;
10109 inline_costs += BRANCH_COST;
10123 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10125 target = ip + 1 + *(signed char*)ip;
10128 ADD_BINCOND (NULL);
10131 inline_costs += BRANCH_COST;
10135 MONO_INST_NEW (cfg, ins, OP_BR);
10138 target = ip + 4 + (gint32)read32(ip);
10140 GET_BBLOCK (cfg, tblock, target);
10141 link_bblock (cfg, cfg->cbb, tblock);
10142 ins->inst_target_bb = tblock;
10143 if (sp != stack_start) {
10144 handle_stack_args (cfg, stack_start, sp - stack_start);
10146 CHECK_UNVERIFIABLE (cfg);
10149 MONO_ADD_INS (cfg->cbb, ins);
10151 start_new_bblock = 1;
10152 inline_costs += BRANCH_COST;
10154 case CEE_BRFALSE_S:
10159 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10160 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10161 guint32 opsize = is_short ? 1 : 4;
10163 CHECK_OPSIZE (opsize);
10165 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10168 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10173 GET_BBLOCK (cfg, tblock, target);
10174 link_bblock (cfg, cfg->cbb, tblock);
10175 GET_BBLOCK (cfg, tblock, ip);
10176 link_bblock (cfg, cfg->cbb, tblock);
10178 if (sp != stack_start) {
10179 handle_stack_args (cfg, stack_start, sp - stack_start);
10180 CHECK_UNVERIFIABLE (cfg);
10183 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10184 cmp->sreg1 = sp [0]->dreg;
10185 type_from_op (cfg, cmp, sp [0], NULL);
10188 #if SIZEOF_REGISTER == 4
10189 if (cmp->opcode == OP_LCOMPARE_IMM) {
10190 /* Convert it to OP_LCOMPARE */
10191 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10192 ins->type = STACK_I8;
10193 ins->dreg = alloc_dreg (cfg, STACK_I8);
10195 MONO_ADD_INS (cfg->cbb, ins);
10196 cmp->opcode = OP_LCOMPARE;
10197 cmp->sreg2 = ins->dreg;
10200 MONO_ADD_INS (cfg->cbb, cmp);
10202 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10203 type_from_op (cfg, ins, sp [0], NULL);
10204 MONO_ADD_INS (cfg->cbb, ins);
10205 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10206 GET_BBLOCK (cfg, tblock, target);
10207 ins->inst_true_bb = tblock;
10208 GET_BBLOCK (cfg, tblock, ip);
10209 ins->inst_false_bb = tblock;
10210 start_new_bblock = 2;
10213 inline_costs += BRANCH_COST;
10228 MONO_INST_NEW (cfg, ins, *ip);
10230 target = ip + 4 + (gint32)read32(ip);
10233 ADD_BINCOND (NULL);
10236 inline_costs += BRANCH_COST;
10240 MonoBasicBlock **targets;
10241 MonoBasicBlock *default_bblock;
10242 MonoJumpInfoBBTable *table;
10243 int offset_reg = alloc_preg (cfg);
10244 int target_reg = alloc_preg (cfg);
10245 int table_reg = alloc_preg (cfg);
10246 int sum_reg = alloc_preg (cfg);
10247 gboolean use_op_switch;
10251 n = read32 (ip + 1);
10254 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10258 CHECK_OPSIZE (n * sizeof (guint32));
10259 target = ip + n * sizeof (guint32);
10261 GET_BBLOCK (cfg, default_bblock, target);
10262 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10264 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10265 for (i = 0; i < n; ++i) {
10266 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10267 targets [i] = tblock;
10268 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10272 if (sp != stack_start) {
10274 * Link the current bb with the targets as well, so handle_stack_args
10275 * will set their in_stack correctly.
10277 link_bblock (cfg, cfg->cbb, default_bblock);
10278 for (i = 0; i < n; ++i)
10279 link_bblock (cfg, cfg->cbb, targets [i]);
10281 handle_stack_args (cfg, stack_start, sp - stack_start);
10283 CHECK_UNVERIFIABLE (cfg);
10285 /* Undo the links */
10286 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10287 for (i = 0; i < n; ++i)
10288 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10291 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10292 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10294 for (i = 0; i < n; ++i)
10295 link_bblock (cfg, cfg->cbb, targets [i]);
10297 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10298 table->table = targets;
10299 table->table_size = n;
10301 use_op_switch = FALSE;
10303 /* ARM implements SWITCH statements differently */
10304 /* FIXME: Make it use the generic implementation */
10305 if (!cfg->compile_aot)
10306 use_op_switch = TRUE;
10309 if (COMPILE_LLVM (cfg))
10310 use_op_switch = TRUE;
10312 cfg->cbb->has_jump_table = 1;
10314 if (use_op_switch) {
10315 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10316 ins->sreg1 = src1->dreg;
10317 ins->inst_p0 = table;
10318 ins->inst_many_bb = targets;
10319 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10320 MONO_ADD_INS (cfg->cbb, ins);
10322 if (sizeof (gpointer) == 8)
10323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10325 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10327 #if SIZEOF_REGISTER == 8
10328 /* The upper word might not be zero, and we add it to a 64 bit address later */
10329 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10332 if (cfg->compile_aot) {
10333 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10335 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10336 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10337 ins->inst_p0 = table;
10338 ins->dreg = table_reg;
10339 MONO_ADD_INS (cfg->cbb, ins);
10342 /* FIXME: Use load_memindex */
10343 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10345 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10347 start_new_bblock = 1;
10348 inline_costs += (BRANCH_COST * 2);
10361 case CEE_LDIND_REF:
10368 dreg = alloc_freg (cfg);
10371 dreg = alloc_lreg (cfg);
10373 case CEE_LDIND_REF:
10374 dreg = alloc_ireg_ref (cfg);
10377 dreg = alloc_preg (cfg);
10380 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10381 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10382 if (*ip == CEE_LDIND_R4)
10383 ins->type = cfg->r4_stack_type;
10384 ins->flags |= ins_flag;
10385 MONO_ADD_INS (cfg->cbb, ins);
10387 if (ins_flag & MONO_INST_VOLATILE) {
10388 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10389 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10394 case CEE_STIND_REF:
10405 if (ins_flag & MONO_INST_VOLATILE) {
10406 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10407 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10410 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10411 ins->flags |= ins_flag;
10414 MONO_ADD_INS (cfg->cbb, ins);
10416 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10417 emit_write_barrier (cfg, sp [0], sp [1]);
10426 MONO_INST_NEW (cfg, ins, (*ip));
10428 ins->sreg1 = sp [0]->dreg;
10429 ins->sreg2 = sp [1]->dreg;
10430 type_from_op (cfg, ins, sp [0], sp [1]);
10432 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10434 /* Use the immediate opcodes if possible */
10435 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10436 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10437 if (imm_opcode != -1) {
10438 ins->opcode = imm_opcode;
10439 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10442 NULLIFY_INS (sp [1]);
10446 MONO_ADD_INS ((cfg)->cbb, (ins));
10448 *sp++ = mono_decompose_opcode (cfg, ins);
10465 MONO_INST_NEW (cfg, ins, (*ip));
10467 ins->sreg1 = sp [0]->dreg;
10468 ins->sreg2 = sp [1]->dreg;
10469 type_from_op (cfg, ins, sp [0], sp [1]);
10471 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10472 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10474 /* FIXME: Pass opcode to is_inst_imm */
10476 /* Use the immediate opcodes if possible */
10477 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10478 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10479 if (imm_opcode != -1) {
10480 ins->opcode = imm_opcode;
10481 if (sp [1]->opcode == OP_I8CONST) {
10482 #if SIZEOF_REGISTER == 8
10483 ins->inst_imm = sp [1]->inst_l;
10485 ins->inst_ls_word = sp [1]->inst_ls_word;
10486 ins->inst_ms_word = sp [1]->inst_ms_word;
10490 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10493 /* Might be followed by an instruction added by add_widen_op */
10494 if (sp [1]->next == NULL)
10495 NULLIFY_INS (sp [1]);
10498 MONO_ADD_INS ((cfg)->cbb, (ins));
10500 *sp++ = mono_decompose_opcode (cfg, ins);
10513 case CEE_CONV_OVF_I8:
10514 case CEE_CONV_OVF_U8:
10515 case CEE_CONV_R_UN:
10518 /* Special case this earlier so we have long constants in the IR */
10519 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10520 int data = sp [-1]->inst_c0;
10521 sp [-1]->opcode = OP_I8CONST;
10522 sp [-1]->type = STACK_I8;
10523 #if SIZEOF_REGISTER == 8
10524 if ((*ip) == CEE_CONV_U8)
10525 sp [-1]->inst_c0 = (guint32)data;
10527 sp [-1]->inst_c0 = data;
10529 sp [-1]->inst_ls_word = data;
10530 if ((*ip) == CEE_CONV_U8)
10531 sp [-1]->inst_ms_word = 0;
10533 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10535 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10542 case CEE_CONV_OVF_I4:
10543 case CEE_CONV_OVF_I1:
10544 case CEE_CONV_OVF_I2:
10545 case CEE_CONV_OVF_I:
10546 case CEE_CONV_OVF_U:
10549 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10550 ADD_UNOP (CEE_CONV_OVF_I8);
10557 case CEE_CONV_OVF_U1:
10558 case CEE_CONV_OVF_U2:
10559 case CEE_CONV_OVF_U4:
10562 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10563 ADD_UNOP (CEE_CONV_OVF_U8);
10570 case CEE_CONV_OVF_I1_UN:
10571 case CEE_CONV_OVF_I2_UN:
10572 case CEE_CONV_OVF_I4_UN:
10573 case CEE_CONV_OVF_I8_UN:
10574 case CEE_CONV_OVF_U1_UN:
10575 case CEE_CONV_OVF_U2_UN:
10576 case CEE_CONV_OVF_U4_UN:
10577 case CEE_CONV_OVF_U8_UN:
10578 case CEE_CONV_OVF_I_UN:
10579 case CEE_CONV_OVF_U_UN:
10586 CHECK_CFG_EXCEPTION;
10590 case CEE_ADD_OVF_UN:
10592 case CEE_MUL_OVF_UN:
10594 case CEE_SUB_OVF_UN:
10600 GSHAREDVT_FAILURE (*ip);
10603 token = read32 (ip + 1);
10604 klass = mini_get_class (method, token, generic_context);
10605 CHECK_TYPELOAD (klass);
10607 if (generic_class_is_reference_type (cfg, klass)) {
10608 MonoInst *store, *load;
10609 int dreg = alloc_ireg_ref (cfg);
10611 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10612 load->flags |= ins_flag;
10613 MONO_ADD_INS (cfg->cbb, load);
10615 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10616 store->flags |= ins_flag;
10617 MONO_ADD_INS (cfg->cbb, store);
10619 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10620 emit_write_barrier (cfg, sp [0], sp [1]);
10622 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10628 int loc_index = -1;
10634 token = read32 (ip + 1);
10635 klass = mini_get_class (method, token, generic_context);
10636 CHECK_TYPELOAD (klass);
10638 /* Optimize the common ldobj+stloc combination */
10641 loc_index = ip [6];
10648 loc_index = ip [5] - CEE_STLOC_0;
10655 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10656 CHECK_LOCAL (loc_index);
10658 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10659 ins->dreg = cfg->locals [loc_index]->dreg;
10660 ins->flags |= ins_flag;
10663 if (ins_flag & MONO_INST_VOLATILE) {
10664 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10665 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10671 /* Optimize the ldobj+stobj combination */
10672 /* The reference case ends up being a load+store anyway */
10673 /* Skip this if the operation is volatile. */
10674 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10679 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10686 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10687 ins->flags |= ins_flag;
10690 if (ins_flag & MONO_INST_VOLATILE) {
10691 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10692 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10701 CHECK_STACK_OVF (1);
10703 n = read32 (ip + 1);
10705 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10706 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10707 ins->type = STACK_OBJ;
10710 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10711 MonoInst *iargs [1];
10712 char *str = (char *)mono_method_get_wrapper_data (method, n);
10714 if (cfg->compile_aot)
10715 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10717 EMIT_NEW_PCONST (cfg, iargs [0], str);
10718 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10720 if (cfg->opt & MONO_OPT_SHARED) {
10721 MonoInst *iargs [3];
10723 if (cfg->compile_aot) {
10724 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10726 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10727 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10728 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10729 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10730 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10732 if (cfg->cbb->out_of_line) {
10733 MonoInst *iargs [2];
10735 if (image == mono_defaults.corlib) {
10737 * Avoid relocations in AOT and save some space by using a
10738 * version of helper_ldstr specialized to mscorlib.
10740 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10741 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10743 /* Avoid creating the string object */
10744 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10745 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10746 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10750 if (cfg->compile_aot) {
10751 NEW_LDSTRCONST (cfg, ins, image, n);
10753 MONO_ADD_INS (cfg->cbb, ins);
10756 NEW_PCONST (cfg, ins, NULL);
10757 ins->type = STACK_OBJ;
10758 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10760 OUT_OF_MEMORY_FAILURE;
10763 MONO_ADD_INS (cfg->cbb, ins);
10772 MonoInst *iargs [2];
10773 MonoMethodSignature *fsig;
10776 MonoInst *vtable_arg = NULL;
10779 token = read32 (ip + 1);
10780 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10783 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10786 mono_save_token_info (cfg, image, token, cmethod);
10788 if (!mono_class_init (cmethod->klass))
10789 TYPE_LOAD_ERROR (cmethod->klass);
10791 context_used = mini_method_check_context_used (cfg, cmethod);
10793 if (mono_security_core_clr_enabled ())
10794 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10796 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10797 emit_class_init (cfg, cmethod->klass);
10798 CHECK_TYPELOAD (cmethod->klass);
10802 if (cfg->gsharedvt) {
10803 if (mini_is_gsharedvt_variable_signature (sig))
10804 GSHAREDVT_FAILURE (*ip);
10808 n = fsig->param_count;
10812 * Generate smaller code for the common newobj <exception> instruction in
10813 * argument checking code.
10815 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10816 is_exception_class (cmethod->klass) && n <= 2 &&
10817 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10818 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10819 MonoInst *iargs [3];
10823 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10826 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10829 iargs [1] = sp [0];
10830 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10833 iargs [1] = sp [0];
10834 iargs [2] = sp [1];
10835 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10838 g_assert_not_reached ();
10846 /* move the args to allow room for 'this' in the first position */
10852 /* check_call_signature () requires sp[0] to be set */
10853 this_ins.type = STACK_OBJ;
10854 sp [0] = &this_ins;
10855 if (check_call_signature (cfg, fsig, sp))
10860 if (mini_class_is_system_array (cmethod->klass)) {
10861 *sp = emit_get_rgctx_method (cfg, context_used,
10862 cmethod, MONO_RGCTX_INFO_METHOD);
10864 /* Avoid varargs in the common case */
10865 if (fsig->param_count == 1)
10866 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10867 else if (fsig->param_count == 2)
10868 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10869 else if (fsig->param_count == 3)
10870 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10871 else if (fsig->param_count == 4)
10872 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10874 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10875 } else if (cmethod->string_ctor) {
10876 g_assert (!context_used);
10877 g_assert (!vtable_arg);
10878 /* we simply pass a null pointer */
10879 EMIT_NEW_PCONST (cfg, *sp, NULL);
10880 /* now call the string ctor */
10881 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10883 if (cmethod->klass->valuetype) {
10884 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10885 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10886 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10891 * The code generated by mini_emit_virtual_call () expects
10892 * iargs [0] to be a boxed instance, but luckily the vcall
10893 * will be transformed into a normal call there.
10895 } else if (context_used) {
10896 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10899 MonoVTable *vtable = NULL;
10901 if (!cfg->compile_aot)
10902 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10903 CHECK_TYPELOAD (cmethod->klass);
10906 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10907 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10908 * As a workaround, we call class cctors before allocating objects.
10910 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10911 emit_class_init (cfg, cmethod->klass);
10912 if (cfg->verbose_level > 2)
10913 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10914 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10917 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10920 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10923 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10925 /* Now call the actual ctor */
10926 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10927 CHECK_CFG_EXCEPTION;
10930 if (alloc == NULL) {
10932 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10933 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10941 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10942 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10945 case CEE_CASTCLASS:
10949 token = read32 (ip + 1);
10950 klass = mini_get_class (method, token, generic_context);
10951 CHECK_TYPELOAD (klass);
10952 if (sp [0]->type != STACK_OBJ)
10955 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10956 CHECK_CFG_EXCEPTION;
10965 token = read32 (ip + 1);
10966 klass = mini_get_class (method, token, generic_context);
10967 CHECK_TYPELOAD (klass);
10968 if (sp [0]->type != STACK_OBJ)
10971 context_used = mini_class_check_context_used (cfg, klass);
10973 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10974 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10975 MonoInst *args [3];
10982 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10985 idx = get_castclass_cache_idx (cfg);
10986 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10988 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10991 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10992 MonoMethod *mono_isinst;
10993 MonoInst *iargs [1];
10996 mono_isinst = mono_marshal_get_isinst (klass);
10997 iargs [0] = sp [0];
10999 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
11000 iargs, ip, cfg->real_offset, TRUE);
11001 CHECK_CFG_EXCEPTION;
11002 g_assert (costs > 0);
11005 cfg->real_offset += 5;
11009 inline_costs += costs;
11012 ins = handle_isinst (cfg, klass, *sp, context_used);
11013 CHECK_CFG_EXCEPTION;
11019 case CEE_UNBOX_ANY: {
11020 MonoInst *res, *addr;
11025 token = read32 (ip + 1);
11026 klass = mini_get_class (method, token, generic_context);
11027 CHECK_TYPELOAD (klass);
11029 mono_save_token_info (cfg, image, token, klass);
11031 context_used = mini_class_check_context_used (cfg, klass);
11033 if (mini_is_gsharedvt_klass (klass)) {
11034 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11036 } else if (generic_class_is_reference_type (cfg, klass)) {
11037 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11038 CHECK_CFG_EXCEPTION;
11039 } else if (mono_class_is_nullable (klass)) {
11040 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11042 addr = handle_unbox (cfg, klass, sp, context_used);
11044 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11055 MonoClass *enum_class;
11056 MonoMethod *has_flag;
11062 token = read32 (ip + 1);
11063 klass = mini_get_class (method, token, generic_context);
11064 CHECK_TYPELOAD (klass);
11066 mono_save_token_info (cfg, image, token, klass);
11068 context_used = mini_class_check_context_used (cfg, klass);
11070 if (generic_class_is_reference_type (cfg, klass)) {
11076 if (klass == mono_defaults.void_class)
11078 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11080 /* frequent check in generic code: box (struct), brtrue */
11085 * <push int/long ptr>
11088 * constrained. MyFlags
11089 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11091 * If we find this sequence and the operand types on box and constrained
11092 * are equal, we can emit a specialized instruction sequence instead of
11093 * the very slow HasFlag () call.
11095 if ((cfg->opt & MONO_OPT_INTRINS) &&
11096 /* Cheap checks first. */
11097 ip + 5 + 6 + 5 < end &&
11098 ip [5] == CEE_PREFIX1 &&
11099 ip [6] == CEE_CONSTRAINED_ &&
11100 ip [11] == CEE_CALLVIRT &&
11101 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11102 mono_class_is_enum (klass) &&
11103 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11104 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11105 has_flag->klass == mono_defaults.enum_class &&
11106 !strcmp (has_flag->name, "HasFlag") &&
11107 has_flag->signature->hasthis &&
11108 has_flag->signature->param_count == 1) {
11109 CHECK_TYPELOAD (enum_class);
11111 if (enum_class == klass) {
11112 MonoInst *enum_this, *enum_flag;
11117 enum_this = sp [0];
11118 enum_flag = sp [1];
11120 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11125 // FIXME: LLVM can't handle the inconsistent bb linking
11126 if (!mono_class_is_nullable (klass) &&
11127 !mini_is_gsharedvt_klass (klass) &&
11128 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11129 (ip [5] == CEE_BRTRUE ||
11130 ip [5] == CEE_BRTRUE_S ||
11131 ip [5] == CEE_BRFALSE ||
11132 ip [5] == CEE_BRFALSE_S)) {
11133 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11135 MonoBasicBlock *true_bb, *false_bb;
11139 if (cfg->verbose_level > 3) {
11140 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11141 printf ("<box+brtrue opt>\n");
11146 case CEE_BRFALSE_S:
11149 target = ip + 1 + (signed char)(*ip);
11156 target = ip + 4 + (gint)(read32 (ip));
11160 g_assert_not_reached ();
11164 * We need to link both bblocks, since it is needed for handling stack
11165 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11166 * Branching to only one of them would lead to inconsistencies, so
11167 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11169 GET_BBLOCK (cfg, true_bb, target);
11170 GET_BBLOCK (cfg, false_bb, ip);
11172 mono_link_bblock (cfg, cfg->cbb, true_bb);
11173 mono_link_bblock (cfg, cfg->cbb, false_bb);
11175 if (sp != stack_start) {
11176 handle_stack_args (cfg, stack_start, sp - stack_start);
11178 CHECK_UNVERIFIABLE (cfg);
11181 if (COMPILE_LLVM (cfg)) {
11182 dreg = alloc_ireg (cfg);
11183 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11186 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11188 /* The JIT can't eliminate the iconst+compare */
11189 MONO_INST_NEW (cfg, ins, OP_BR);
11190 ins->inst_target_bb = is_true ? true_bb : false_bb;
11191 MONO_ADD_INS (cfg->cbb, ins);
11194 start_new_bblock = 1;
11198 *sp++ = handle_box (cfg, val, klass, context_used);
11200 CHECK_CFG_EXCEPTION;
11209 token = read32 (ip + 1);
11210 klass = mini_get_class (method, token, generic_context);
11211 CHECK_TYPELOAD (klass);
11213 mono_save_token_info (cfg, image, token, klass);
11215 context_used = mini_class_check_context_used (cfg, klass);
11217 if (mono_class_is_nullable (klass)) {
11220 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11221 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11225 ins = handle_unbox (cfg, klass, sp, context_used);
11238 MonoClassField *field;
11239 #ifndef DISABLE_REMOTING
11243 gboolean is_instance;
11245 gpointer addr = NULL;
11246 gboolean is_special_static;
11248 MonoInst *store_val = NULL;
11249 MonoInst *thread_ins;
11252 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11254 if (op == CEE_STFLD) {
11257 store_val = sp [1];
11262 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11264 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11267 if (op == CEE_STSFLD) {
11270 store_val = sp [0];
11275 token = read32 (ip + 1);
11276 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11277 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11278 klass = field->parent;
11281 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11284 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11285 FIELD_ACCESS_FAILURE (method, field);
11286 mono_class_init (klass);
11288 /* if the class is Critical then transparent code cannot access it's fields */
11289 if (!is_instance && mono_security_core_clr_enabled ())
11290 ensure_method_is_allowed_to_access_field (cfg, method, field);
11292 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11293 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11294 if (mono_security_core_clr_enabled ())
11295 ensure_method_is_allowed_to_access_field (cfg, method, field);
11298 ftype = mono_field_get_type (field);
11301 * LDFLD etc. is usable on static fields as well, so convert those cases to
11304 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11316 g_assert_not_reached ();
11318 is_instance = FALSE;
11321 context_used = mini_class_check_context_used (cfg, klass);
11323 /* INSTANCE CASE */
11325 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11326 if (op == CEE_STFLD) {
11327 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11329 #ifndef DISABLE_REMOTING
11330 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11331 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11332 MonoInst *iargs [5];
11334 GSHAREDVT_FAILURE (op);
11336 iargs [0] = sp [0];
11337 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11338 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11339 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11341 iargs [4] = sp [1];
11343 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11344 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11345 iargs, ip, cfg->real_offset, TRUE);
11346 CHECK_CFG_EXCEPTION;
11347 g_assert (costs > 0);
11349 cfg->real_offset += 5;
11351 inline_costs += costs;
11353 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11360 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11362 if (mini_is_gsharedvt_klass (klass)) {
11363 MonoInst *offset_ins;
11365 context_used = mini_class_check_context_used (cfg, klass);
11367 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11368 /* The value is offset by 1 */
11369 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11370 dreg = alloc_ireg_mp (cfg);
11371 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11372 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11373 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11375 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11377 if (sp [0]->opcode != OP_LDADDR)
11378 store->flags |= MONO_INST_FAULT;
11380 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11381 /* insert call to write barrier */
11385 dreg = alloc_ireg_mp (cfg);
11386 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11387 emit_write_barrier (cfg, ptr, sp [1]);
11390 store->flags |= ins_flag;
11397 #ifndef DISABLE_REMOTING
11398 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11399 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11400 MonoInst *iargs [4];
11402 GSHAREDVT_FAILURE (op);
11404 iargs [0] = sp [0];
11405 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11406 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11407 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11408 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11409 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11410 iargs, ip, cfg->real_offset, TRUE);
11411 CHECK_CFG_EXCEPTION;
11412 g_assert (costs > 0);
11414 cfg->real_offset += 5;
11418 inline_costs += costs;
11420 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11426 if (sp [0]->type == STACK_VTYPE) {
11429 /* Have to compute the address of the variable */
11431 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11433 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11435 g_assert (var->klass == klass);
11437 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11441 if (op == CEE_LDFLDA) {
11442 if (sp [0]->type == STACK_OBJ) {
11443 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11444 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11447 dreg = alloc_ireg_mp (cfg);
11449 if (mini_is_gsharedvt_klass (klass)) {
11450 MonoInst *offset_ins;
11452 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11453 /* The value is offset by 1 */
11454 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11455 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11457 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11459 ins->klass = mono_class_from_mono_type (field->type);
11460 ins->type = STACK_MP;
11465 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11467 if (mini_is_gsharedvt_klass (klass)) {
11468 MonoInst *offset_ins;
11470 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11471 /* The value is offset by 1 */
11472 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11473 dreg = alloc_ireg_mp (cfg);
11474 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11475 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11479 load->flags |= ins_flag;
11480 if (sp [0]->opcode != OP_LDADDR)
11481 load->flags |= MONO_INST_FAULT;
11493 context_used = mini_class_check_context_used (cfg, klass);
11495 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11498 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11499 * to be called here.
11501 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11502 mono_class_vtable (cfg->domain, klass);
11503 CHECK_TYPELOAD (klass);
11505 mono_domain_lock (cfg->domain);
11506 if (cfg->domain->special_static_fields)
11507 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11508 mono_domain_unlock (cfg->domain);
11510 is_special_static = mono_class_field_is_special_static (field);
11512 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11513 thread_ins = mono_get_thread_intrinsic (cfg);
11517 /* Generate IR to compute the field address */
11518 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11520 * Fast access to TLS data
11521 * Inline version of get_thread_static_data () in
11525 int idx, static_data_reg, array_reg, dreg;
11527 GSHAREDVT_FAILURE (op);
11529 MONO_ADD_INS (cfg->cbb, thread_ins);
11530 static_data_reg = alloc_ireg (cfg);
11531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11533 if (cfg->compile_aot) {
11534 int offset_reg, offset2_reg, idx_reg;
11536 /* For TLS variables, this will return the TLS offset */
11537 EMIT_NEW_SFLDACONST (cfg, ins, field);
11538 offset_reg = ins->dreg;
11539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11540 idx_reg = alloc_ireg (cfg);
11541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11543 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11544 array_reg = alloc_ireg (cfg);
11545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11546 offset2_reg = alloc_ireg (cfg);
11547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11549 dreg = alloc_ireg (cfg);
11550 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11552 offset = (gsize)addr & 0x7fffffff;
11553 idx = offset & 0x3f;
11555 array_reg = alloc_ireg (cfg);
11556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11557 dreg = alloc_ireg (cfg);
11558 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11560 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11561 (cfg->compile_aot && is_special_static) ||
11562 (context_used && is_special_static)) {
11563 MonoInst *iargs [2];
11565 g_assert (field->parent);
11566 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11567 if (context_used) {
11568 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11569 field, MONO_RGCTX_INFO_CLASS_FIELD);
11571 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11573 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11574 } else if (context_used) {
11575 MonoInst *static_data;
11578 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11579 method->klass->name_space, method->klass->name, method->name,
11580 depth, field->offset);
11583 if (mono_class_needs_cctor_run (klass, method))
11584 emit_class_init (cfg, klass);
11587 * The pointer we're computing here is
11589 * super_info.static_data + field->offset
11591 static_data = emit_get_rgctx_klass (cfg, context_used,
11592 klass, MONO_RGCTX_INFO_STATIC_DATA);
11594 if (mini_is_gsharedvt_klass (klass)) {
11595 MonoInst *offset_ins;
11597 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11598 /* The value is offset by 1 */
11599 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11600 dreg = alloc_ireg_mp (cfg);
11601 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11602 } else if (field->offset == 0) {
11605 int addr_reg = mono_alloc_preg (cfg);
11606 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11608 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11609 MonoInst *iargs [2];
11611 g_assert (field->parent);
11612 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11613 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11614 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11616 MonoVTable *vtable = NULL;
11618 if (!cfg->compile_aot)
11619 vtable = mono_class_vtable (cfg->domain, klass);
11620 CHECK_TYPELOAD (klass);
11623 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11624 if (!(g_slist_find (class_inits, klass))) {
11625 emit_class_init (cfg, klass);
11626 if (cfg->verbose_level > 2)
11627 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11628 class_inits = g_slist_prepend (class_inits, klass);
11631 if (cfg->run_cctors) {
11632 /* This makes so that inline cannot trigger */
11633 /* .cctors: too many apps depend on them */
11634 /* running with a specific order... */
11636 if (! vtable->initialized)
11637 INLINE_FAILURE ("class init");
11638 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11639 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11640 g_assert_not_reached ();
11641 goto exception_exit;
11645 if (cfg->compile_aot)
11646 EMIT_NEW_SFLDACONST (cfg, ins, field);
11649 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11651 EMIT_NEW_PCONST (cfg, ins, addr);
11654 MonoInst *iargs [1];
11655 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11656 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11660 /* Generate IR to do the actual load/store operation */
11662 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11663 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11664 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11667 if (op == CEE_LDSFLDA) {
11668 ins->klass = mono_class_from_mono_type (ftype);
11669 ins->type = STACK_PTR;
11671 } else if (op == CEE_STSFLD) {
11674 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11675 store->flags |= ins_flag;
11677 gboolean is_const = FALSE;
11678 MonoVTable *vtable = NULL;
11679 gpointer addr = NULL;
11681 if (!context_used) {
11682 vtable = mono_class_vtable (cfg->domain, klass);
11683 CHECK_TYPELOAD (klass);
11685 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11686 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11687 int ro_type = ftype->type;
11689 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11690 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11691 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11694 GSHAREDVT_FAILURE (op);
11696 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11699 case MONO_TYPE_BOOLEAN:
11701 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11705 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11708 case MONO_TYPE_CHAR:
11710 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11714 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11719 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11723 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11728 case MONO_TYPE_PTR:
11729 case MONO_TYPE_FNPTR:
11730 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11731 type_to_eval_stack_type ((cfg), field->type, *sp);
11734 case MONO_TYPE_STRING:
11735 case MONO_TYPE_OBJECT:
11736 case MONO_TYPE_CLASS:
11737 case MONO_TYPE_SZARRAY:
11738 case MONO_TYPE_ARRAY:
11739 if (!mono_gc_is_moving ()) {
11740 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11741 type_to_eval_stack_type ((cfg), field->type, *sp);
11749 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11754 case MONO_TYPE_VALUETYPE:
11764 CHECK_STACK_OVF (1);
11766 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11767 load->flags |= ins_flag;
11773 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11774 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11775 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11786 token = read32 (ip + 1);
11787 klass = mini_get_class (method, token, generic_context);
11788 CHECK_TYPELOAD (klass);
11789 if (ins_flag & MONO_INST_VOLATILE) {
11790 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11791 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11793 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11794 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11795 ins->flags |= ins_flag;
11796 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11797 generic_class_is_reference_type (cfg, klass)) {
11798 /* insert call to write barrier */
11799 emit_write_barrier (cfg, sp [0], sp [1]);
11811 const char *data_ptr;
11813 guint32 field_token;
11819 token = read32 (ip + 1);
11821 klass = mini_get_class (method, token, generic_context);
11822 CHECK_TYPELOAD (klass);
11824 context_used = mini_class_check_context_used (cfg, klass);
11826 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11827 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11828 ins->sreg1 = sp [0]->dreg;
11829 ins->type = STACK_I4;
11830 ins->dreg = alloc_ireg (cfg);
11831 MONO_ADD_INS (cfg->cbb, ins);
11832 *sp = mono_decompose_opcode (cfg, ins);
11835 if (context_used) {
11836 MonoInst *args [3];
11837 MonoClass *array_class = mono_array_class_get (klass, 1);
11838 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11840 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11843 args [0] = emit_get_rgctx_klass (cfg, context_used,
11844 array_class, MONO_RGCTX_INFO_VTABLE);
11849 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11851 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11853 if (cfg->opt & MONO_OPT_SHARED) {
11854 /* Decompose now to avoid problems with references to the domainvar */
11855 MonoInst *iargs [3];
11857 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11858 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11859 iargs [2] = sp [0];
11861 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11863 /* Decompose later since it is needed by abcrem */
11864 MonoClass *array_type = mono_array_class_get (klass, 1);
11865 mono_class_vtable (cfg->domain, array_type);
11866 CHECK_TYPELOAD (array_type);
11868 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11869 ins->dreg = alloc_ireg_ref (cfg);
11870 ins->sreg1 = sp [0]->dreg;
11871 ins->inst_newa_class = klass;
11872 ins->type = STACK_OBJ;
11873 ins->klass = array_type;
11874 MONO_ADD_INS (cfg->cbb, ins);
11875 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11876 cfg->cbb->has_array_access = TRUE;
11878 /* Needed so mono_emit_load_get_addr () gets called */
11879 mono_get_got_var (cfg);
11889 * we inline/optimize the initialization sequence if possible.
11890 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11891 * for small sizes open code the memcpy
11892 * ensure the rva field is big enough
11894 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11895 MonoMethod *memcpy_method = get_memcpy_method ();
11896 MonoInst *iargs [3];
11897 int add_reg = alloc_ireg_mp (cfg);
11899 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11900 if (cfg->compile_aot) {
11901 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11903 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11905 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11906 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11915 if (sp [0]->type != STACK_OBJ)
11918 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11919 ins->dreg = alloc_preg (cfg);
11920 ins->sreg1 = sp [0]->dreg;
11921 ins->type = STACK_I4;
11922 /* This flag will be inherited by the decomposition */
11923 ins->flags |= MONO_INST_FAULT;
11924 MONO_ADD_INS (cfg->cbb, ins);
11925 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11926 cfg->cbb->has_array_access = TRUE;
11934 if (sp [0]->type != STACK_OBJ)
11937 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11939 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11940 CHECK_TYPELOAD (klass);
11941 /* we need to make sure that this array is exactly the type it needs
11942 * to be for correctness. the wrappers are lax with their usage
11943 * so we need to ignore them here
11945 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11946 MonoClass *array_class = mono_array_class_get (klass, 1);
11947 mini_emit_check_array_type (cfg, sp [0], array_class);
11948 CHECK_TYPELOAD (array_class);
11952 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11957 case CEE_LDELEM_I1:
11958 case CEE_LDELEM_U1:
11959 case CEE_LDELEM_I2:
11960 case CEE_LDELEM_U2:
11961 case CEE_LDELEM_I4:
11962 case CEE_LDELEM_U4:
11963 case CEE_LDELEM_I8:
11965 case CEE_LDELEM_R4:
11966 case CEE_LDELEM_R8:
11967 case CEE_LDELEM_REF: {
11973 if (*ip == CEE_LDELEM) {
11975 token = read32 (ip + 1);
11976 klass = mini_get_class (method, token, generic_context);
11977 CHECK_TYPELOAD (klass);
11978 mono_class_init (klass);
11981 klass = array_access_to_klass (*ip);
11983 if (sp [0]->type != STACK_OBJ)
11986 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11988 if (mini_is_gsharedvt_variable_klass (klass)) {
11989 // FIXME-VT: OP_ICONST optimization
11990 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11991 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11992 ins->opcode = OP_LOADV_MEMBASE;
11993 } else if (sp [1]->opcode == OP_ICONST) {
11994 int array_reg = sp [0]->dreg;
11995 int index_reg = sp [1]->dreg;
11996 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11998 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11999 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
12001 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12002 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12004 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12005 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12008 if (*ip == CEE_LDELEM)
12015 case CEE_STELEM_I1:
12016 case CEE_STELEM_I2:
12017 case CEE_STELEM_I4:
12018 case CEE_STELEM_I8:
12019 case CEE_STELEM_R4:
12020 case CEE_STELEM_R8:
12021 case CEE_STELEM_REF:
12026 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12028 if (*ip == CEE_STELEM) {
12030 token = read32 (ip + 1);
12031 klass = mini_get_class (method, token, generic_context);
12032 CHECK_TYPELOAD (klass);
12033 mono_class_init (klass);
12036 klass = array_access_to_klass (*ip);
12038 if (sp [0]->type != STACK_OBJ)
12041 emit_array_store (cfg, klass, sp, TRUE);
12043 if (*ip == CEE_STELEM)
12050 case CEE_CKFINITE: {
12054 if (cfg->llvm_only) {
12055 MonoInst *iargs [1];
12057 iargs [0] = sp [0];
12058 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12060 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12061 ins->sreg1 = sp [0]->dreg;
12062 ins->dreg = alloc_freg (cfg);
12063 ins->type = STACK_R8;
12064 MONO_ADD_INS (cfg->cbb, ins);
12066 *sp++ = mono_decompose_opcode (cfg, ins);
12072 case CEE_REFANYVAL: {
12073 MonoInst *src_var, *src;
12075 int klass_reg = alloc_preg (cfg);
12076 int dreg = alloc_preg (cfg);
12078 GSHAREDVT_FAILURE (*ip);
12081 MONO_INST_NEW (cfg, ins, *ip);
12084 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12085 CHECK_TYPELOAD (klass);
12087 context_used = mini_class_check_context_used (cfg, klass);
12090 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12092 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12093 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12096 if (context_used) {
12097 MonoInst *klass_ins;
12099 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12100 klass, MONO_RGCTX_INFO_KLASS);
12103 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12104 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12106 mini_emit_class_check (cfg, klass_reg, klass);
12108 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12109 ins->type = STACK_MP;
12110 ins->klass = klass;
12115 case CEE_MKREFANY: {
12116 MonoInst *loc, *addr;
12118 GSHAREDVT_FAILURE (*ip);
12121 MONO_INST_NEW (cfg, ins, *ip);
12124 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12125 CHECK_TYPELOAD (klass);
12127 context_used = mini_class_check_context_used (cfg, klass);
12129 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12130 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12132 if (context_used) {
12133 MonoInst *const_ins;
12134 int type_reg = alloc_preg (cfg);
12136 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12140 } else if (cfg->compile_aot) {
12141 int const_reg = alloc_preg (cfg);
12142 int type_reg = alloc_preg (cfg);
12144 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12149 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12150 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12152 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12154 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12155 ins->type = STACK_VTYPE;
12156 ins->klass = mono_defaults.typed_reference_class;
12161 case CEE_LDTOKEN: {
12163 MonoClass *handle_class;
12165 CHECK_STACK_OVF (1);
12168 n = read32 (ip + 1);
12170 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12171 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12172 handle = mono_method_get_wrapper_data (method, n);
12173 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12174 if (handle_class == mono_defaults.typehandle_class)
12175 handle = &((MonoClass*)handle)->byval_arg;
12178 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12183 mono_class_init (handle_class);
12184 if (cfg->gshared) {
12185 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12186 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12187 /* This case handles ldtoken
12188 of an open type, like for
12191 } else if (handle_class == mono_defaults.typehandle_class) {
12192 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12193 } else if (handle_class == mono_defaults.fieldhandle_class)
12194 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12195 else if (handle_class == mono_defaults.methodhandle_class)
12196 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12198 g_assert_not_reached ();
12201 if ((cfg->opt & MONO_OPT_SHARED) &&
12202 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12203 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12204 MonoInst *addr, *vtvar, *iargs [3];
12205 int method_context_used;
12207 method_context_used = mini_method_check_context_used (cfg, method);
12209 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12211 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12212 EMIT_NEW_ICONST (cfg, iargs [1], n);
12213 if (method_context_used) {
12214 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12215 method, MONO_RGCTX_INFO_METHOD);
12216 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12218 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12219 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12221 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12223 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12225 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12227 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12228 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12229 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12230 (cmethod->klass == mono_defaults.systemtype_class) &&
12231 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12232 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12234 mono_class_init (tclass);
12235 if (context_used) {
12236 ins = emit_get_rgctx_klass (cfg, context_used,
12237 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12238 } else if (cfg->compile_aot) {
12239 if (method->wrapper_type) {
12240 mono_error_init (&error); //got to do it since there are multiple conditionals below
12241 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12242 /* Special case for static synchronized wrappers */
12243 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12245 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12246 /* FIXME: n is not a normal token */
12248 EMIT_NEW_PCONST (cfg, ins, NULL);
12251 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12254 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12256 EMIT_NEW_PCONST (cfg, ins, rt);
12258 ins->type = STACK_OBJ;
12259 ins->klass = cmethod->klass;
12262 MonoInst *addr, *vtvar;
12264 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12266 if (context_used) {
12267 if (handle_class == mono_defaults.typehandle_class) {
12268 ins = emit_get_rgctx_klass (cfg, context_used,
12269 mono_class_from_mono_type ((MonoType *)handle),
12270 MONO_RGCTX_INFO_TYPE);
12271 } else if (handle_class == mono_defaults.methodhandle_class) {
12272 ins = emit_get_rgctx_method (cfg, context_used,
12273 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12274 } else if (handle_class == mono_defaults.fieldhandle_class) {
12275 ins = emit_get_rgctx_field (cfg, context_used,
12276 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12278 g_assert_not_reached ();
12280 } else if (cfg->compile_aot) {
12281 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12283 EMIT_NEW_PCONST (cfg, ins, handle);
12285 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12286 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12287 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12297 MONO_INST_NEW (cfg, ins, OP_THROW);
12299 ins->sreg1 = sp [0]->dreg;
12301 cfg->cbb->out_of_line = TRUE;
12302 MONO_ADD_INS (cfg->cbb, ins);
12303 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12304 MONO_ADD_INS (cfg->cbb, ins);
12307 link_bblock (cfg, cfg->cbb, end_bblock);
12308 start_new_bblock = 1;
12309 /* This can complicate code generation for llvm since the return value might not be defined */
12310 if (COMPILE_LLVM (cfg))
12311 INLINE_FAILURE ("throw");
12313 case CEE_ENDFINALLY:
12314 /* mono_save_seq_point_info () depends on this */
12315 if (sp != stack_start)
12316 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12317 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12318 MONO_ADD_INS (cfg->cbb, ins);
12320 start_new_bblock = 1;
12323 * Control will leave the method so empty the stack, otherwise
12324 * the next basic block will start with a nonempty stack.
12326 while (sp != stack_start) {
12331 case CEE_LEAVE_S: {
12334 if (*ip == CEE_LEAVE) {
12336 target = ip + 5 + (gint32)read32(ip + 1);
12339 target = ip + 2 + (signed char)(ip [1]);
12342 /* empty the stack */
12343 while (sp != stack_start) {
12348 * If this leave statement is in a catch block, check for a
12349 * pending exception, and rethrow it if necessary.
12350 * We avoid doing this in runtime invoke wrappers, since those are called
12351 * by native code which excepts the wrapper to catch all exceptions.
12353 for (i = 0; i < header->num_clauses; ++i) {
12354 MonoExceptionClause *clause = &header->clauses [i];
12357 * Use <= in the final comparison to handle clauses with multiple
12358 * leave statements, like in bug #78024.
12359 * The ordering of the exception clauses guarantees that we find the
12360 * innermost clause.
12362 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12364 MonoBasicBlock *dont_throw;
12369 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12372 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12374 NEW_BBLOCK (cfg, dont_throw);
12377 * Currently, we always rethrow the abort exception, despite the
12378 * fact that this is not correct. See thread6.cs for an example.
12379 * But propagating the abort exception is more important than
12380 * getting the sematics right.
12382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12384 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12386 MONO_START_BB (cfg, dont_throw);
12391 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12394 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12396 MonoExceptionClause *clause;
12398 for (tmp = handlers; tmp; tmp = tmp->next) {
12399 clause = (MonoExceptionClause *)tmp->data;
12400 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12402 link_bblock (cfg, cfg->cbb, tblock);
12403 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12404 ins->inst_target_bb = tblock;
12405 ins->inst_eh_block = clause;
12406 MONO_ADD_INS (cfg->cbb, ins);
12407 cfg->cbb->has_call_handler = 1;
12408 if (COMPILE_LLVM (cfg)) {
12409 MonoBasicBlock *target_bb;
12412 * Link the finally bblock with the target, since it will
12413 * conceptually branch there.
12415 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12416 GET_BBLOCK (cfg, target_bb, target);
12417 link_bblock (cfg, tblock, target_bb);
12420 g_list_free (handlers);
12423 MONO_INST_NEW (cfg, ins, OP_BR);
12424 MONO_ADD_INS (cfg->cbb, ins);
12425 GET_BBLOCK (cfg, tblock, target);
12426 link_bblock (cfg, cfg->cbb, tblock);
12427 ins->inst_target_bb = tblock;
12429 start_new_bblock = 1;
12431 if (*ip == CEE_LEAVE)
12440 * Mono specific opcodes
12442 case MONO_CUSTOM_PREFIX: {
12444 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12448 case CEE_MONO_ICALL: {
12450 MonoJitICallInfo *info;
12452 token = read32 (ip + 2);
12453 func = mono_method_get_wrapper_data (method, token);
12454 info = mono_find_jit_icall_by_addr (func);
12456 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12459 CHECK_STACK (info->sig->param_count);
12460 sp -= info->sig->param_count;
12462 ins = mono_emit_jit_icall (cfg, info->func, sp);
12463 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12467 inline_costs += 10 * num_calls++;
12471 case CEE_MONO_LDPTR_CARD_TABLE:
12472 case CEE_MONO_LDPTR_NURSERY_START:
12473 case CEE_MONO_LDPTR_NURSERY_BITS:
12474 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12475 CHECK_STACK_OVF (1);
12478 case CEE_MONO_LDPTR_CARD_TABLE:
12479 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12481 case CEE_MONO_LDPTR_NURSERY_START:
12482 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12484 case CEE_MONO_LDPTR_NURSERY_BITS:
12485 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12487 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12488 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12494 inline_costs += 10 * num_calls++;
12497 case CEE_MONO_LDPTR: {
12500 CHECK_STACK_OVF (1);
12502 token = read32 (ip + 2);
12504 ptr = mono_method_get_wrapper_data (method, token);
12505 EMIT_NEW_PCONST (cfg, ins, ptr);
12508 inline_costs += 10 * num_calls++;
12509 /* Can't embed random pointers into AOT code */
12513 case CEE_MONO_JIT_ICALL_ADDR: {
12514 MonoJitICallInfo *callinfo;
12517 CHECK_STACK_OVF (1);
12519 token = read32 (ip + 2);
12521 ptr = mono_method_get_wrapper_data (method, token);
12522 callinfo = mono_find_jit_icall_by_addr (ptr);
12523 g_assert (callinfo);
12524 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12527 inline_costs += 10 * num_calls++;
12530 case CEE_MONO_ICALL_ADDR: {
12531 MonoMethod *cmethod;
12534 CHECK_STACK_OVF (1);
12536 token = read32 (ip + 2);
12538 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12540 if (cfg->compile_aot) {
12541 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12543 ptr = mono_lookup_internal_call (cmethod);
12545 EMIT_NEW_PCONST (cfg, ins, ptr);
12551 case CEE_MONO_VTADDR: {
12552 MonoInst *src_var, *src;
12558 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12559 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12564 case CEE_MONO_NEWOBJ: {
12565 MonoInst *iargs [2];
12567 CHECK_STACK_OVF (1);
12569 token = read32 (ip + 2);
12570 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12571 mono_class_init (klass);
12572 NEW_DOMAINCONST (cfg, iargs [0]);
12573 MONO_ADD_INS (cfg->cbb, iargs [0]);
12574 NEW_CLASSCONST (cfg, iargs [1], klass);
12575 MONO_ADD_INS (cfg->cbb, iargs [1]);
12576 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12578 inline_costs += 10 * num_calls++;
12581 case CEE_MONO_OBJADDR:
12584 MONO_INST_NEW (cfg, ins, OP_MOVE);
12585 ins->dreg = alloc_ireg_mp (cfg);
12586 ins->sreg1 = sp [0]->dreg;
12587 ins->type = STACK_MP;
12588 MONO_ADD_INS (cfg->cbb, ins);
12592 case CEE_MONO_LDNATIVEOBJ:
12594 * Similar to LDOBJ, but instead load the unmanaged
12595 * representation of the vtype to the stack.
12600 token = read32 (ip + 2);
12601 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12602 g_assert (klass->valuetype);
12603 mono_class_init (klass);
12606 MonoInst *src, *dest, *temp;
12609 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12610 temp->backend.is_pinvoke = 1;
12611 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12612 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12614 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12615 dest->type = STACK_VTYPE;
12616 dest->klass = klass;
12622 case CEE_MONO_RETOBJ: {
12624 * Same as RET, but return the native representation of a vtype
12627 g_assert (cfg->ret);
12628 g_assert (mono_method_signature (method)->pinvoke);
12633 token = read32 (ip + 2);
12634 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12636 if (!cfg->vret_addr) {
12637 g_assert (cfg->ret_var_is_local);
12639 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12641 EMIT_NEW_RETLOADA (cfg, ins);
12643 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12645 if (sp != stack_start)
12648 MONO_INST_NEW (cfg, ins, OP_BR);
12649 ins->inst_target_bb = end_bblock;
12650 MONO_ADD_INS (cfg->cbb, ins);
12651 link_bblock (cfg, cfg->cbb, end_bblock);
12652 start_new_bblock = 1;
12656 case CEE_MONO_CISINST:
12657 case CEE_MONO_CCASTCLASS: {
12662 token = read32 (ip + 2);
12663 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12664 if (ip [1] == CEE_MONO_CISINST)
12665 ins = handle_cisinst (cfg, klass, sp [0]);
12667 ins = handle_ccastclass (cfg, klass, sp [0]);
12672 case CEE_MONO_SAVE_LMF:
12673 case CEE_MONO_RESTORE_LMF:
12676 case CEE_MONO_CLASSCONST:
12677 CHECK_STACK_OVF (1);
12679 token = read32 (ip + 2);
12680 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12683 inline_costs += 10 * num_calls++;
12685 case CEE_MONO_NOT_TAKEN:
12686 cfg->cbb->out_of_line = TRUE;
12689 case CEE_MONO_TLS: {
12692 CHECK_STACK_OVF (1);
12694 key = (MonoTlsKey)read32 (ip + 2);
12695 g_assert (key < TLS_KEY_NUM);
12697 ins = mono_create_tls_get (cfg, key);
12699 if (cfg->compile_aot) {
12701 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12702 ins->dreg = alloc_preg (cfg);
12703 ins->type = STACK_PTR;
12705 g_assert_not_reached ();
12708 ins->type = STACK_PTR;
12709 MONO_ADD_INS (cfg->cbb, ins);
12714 case CEE_MONO_DYN_CALL: {
12715 MonoCallInst *call;
12717 /* It would be easier to call a trampoline, but that would put an
12718 * extra frame on the stack, confusing exception handling. So
12719 * implement it inline using an opcode for now.
12722 if (!cfg->dyn_call_var) {
12723 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12724 /* prevent it from being register allocated */
12725 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12728 /* Has to use a call inst since it local regalloc expects it */
12729 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12730 ins = (MonoInst*)call;
12732 ins->sreg1 = sp [0]->dreg;
12733 ins->sreg2 = sp [1]->dreg;
12734 MONO_ADD_INS (cfg->cbb, ins);
12736 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12739 inline_costs += 10 * num_calls++;
12743 case CEE_MONO_MEMORY_BARRIER: {
12745 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12749 case CEE_MONO_JIT_ATTACH: {
12750 MonoInst *args [16], *domain_ins;
12751 MonoInst *ad_ins, *jit_tls_ins;
12752 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12754 cfg->attach_cookie = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12755 cfg->attach_dummy = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12757 if (mono_threads_is_coop_enabled ()) {
12758 /* AOT code is only used in the root domain */
12759 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12760 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12761 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12762 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12764 EMIT_NEW_PCONST (cfg, ins, NULL);
12765 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12767 ad_ins = mono_get_domain_intrinsic (cfg);
12768 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12770 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12771 NEW_BBLOCK (cfg, next_bb);
12772 NEW_BBLOCK (cfg, call_bb);
12774 if (cfg->compile_aot) {
12775 /* AOT code is only used in the root domain */
12776 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12778 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12780 MONO_ADD_INS (cfg->cbb, ad_ins);
12781 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12782 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12784 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12786 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12789 MONO_START_BB (cfg, call_bb);
12792 /* AOT code is only used in the root domain */
12793 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12794 EMIT_NEW_PCONST (cfg, args [1], NULL);
12795 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12796 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12799 MONO_START_BB (cfg, next_bb);
12805 case CEE_MONO_JIT_DETACH: {
12806 MonoInst *args [16];
12808 /* Restore the original domain */
12809 dreg = alloc_ireg (cfg);
12810 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->attach_cookie->dreg);
12811 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12812 mono_emit_jit_icall (cfg, mono_jit_thread_detach, args);
12816 case CEE_MONO_CALLI_EXTRA_ARG: {
12818 MonoMethodSignature *fsig;
12822 * This is the same as CEE_CALLI, but passes an additional argument
12823 * to the called method in llvmonly mode.
12824 * This is only used by delegate invoke wrappers to call the
12825 * actual delegate method.
12827 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12830 token = read32 (ip + 2);
12838 fsig = mini_get_signature (method, token, generic_context);
12840 if (cfg->llvm_only)
12841 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12843 n = fsig->param_count + fsig->hasthis + 1;
12850 if (cfg->llvm_only) {
12852 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12853 * cconv. This is set by mono_init_delegate ().
12855 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12856 MonoInst *callee = addr;
12857 MonoInst *call, *localloc_ins;
12858 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12859 int low_bit_reg = alloc_preg (cfg);
12861 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12862 NEW_BBLOCK (cfg, end_bb);
12864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12868 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12869 addr = emit_get_rgctx_sig (cfg, context_used,
12870 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12872 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12874 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12875 ins->dreg = alloc_preg (cfg);
12876 ins->inst_imm = 2 * SIZEOF_VOID_P;
12877 MONO_ADD_INS (cfg->cbb, ins);
12878 localloc_ins = ins;
12879 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12883 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12884 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12886 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12887 MONO_START_BB (cfg, is_gsharedvt_bb);
12888 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12889 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12890 ins->dreg = call->dreg;
12892 MONO_START_BB (cfg, end_bb);
12894 /* Caller uses a normal calling conv */
12896 MonoInst *callee = addr;
12897 MonoInst *call, *localloc_ins;
12898 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12899 int low_bit_reg = alloc_preg (cfg);
12901 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12902 NEW_BBLOCK (cfg, end_bb);
12904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12908 /* Normal case: callee uses a normal cconv, no conversion is needed */
12909 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12910 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12911 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12912 MONO_START_BB (cfg, is_gsharedvt_bb);
12913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12914 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12915 MONO_ADD_INS (cfg->cbb, addr);
12917 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12919 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12920 ins->dreg = alloc_preg (cfg);
12921 ins->inst_imm = 2 * SIZEOF_VOID_P;
12922 MONO_ADD_INS (cfg->cbb, ins);
12923 localloc_ins = ins;
12924 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12926 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12928 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12929 ins->dreg = call->dreg;
12930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12932 MONO_START_BB (cfg, end_bb);
12935 /* Same as CEE_CALLI */
12936 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12938 * We pass the address to the gsharedvt trampoline in the rgctx reg
12940 MonoInst *callee = addr;
12942 addr = emit_get_rgctx_sig (cfg, context_used,
12943 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12944 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12946 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12950 if (!MONO_TYPE_IS_VOID (fsig->ret))
12951 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12953 CHECK_CFG_EXCEPTION;
12957 constrained_class = NULL;
12961 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12967 case CEE_PREFIX1: {
12970 case CEE_ARGLIST: {
12971 /* somewhat similar to LDTOKEN */
12972 MonoInst *addr, *vtvar;
12973 CHECK_STACK_OVF (1);
12974 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12976 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12977 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12979 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12980 ins->type = STACK_VTYPE;
12981 ins->klass = mono_defaults.argumenthandle_class;
12991 MonoInst *cmp, *arg1, *arg2;
12999 * The following transforms:
13000 * CEE_CEQ into OP_CEQ
13001 * CEE_CGT into OP_CGT
13002 * CEE_CGT_UN into OP_CGT_UN
13003 * CEE_CLT into OP_CLT
13004 * CEE_CLT_UN into OP_CLT_UN
13006 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13008 MONO_INST_NEW (cfg, ins, cmp->opcode);
13009 cmp->sreg1 = arg1->dreg;
13010 cmp->sreg2 = arg2->dreg;
13011 type_from_op (cfg, cmp, arg1, arg2);
13013 add_widen_op (cfg, cmp, &arg1, &arg2);
13014 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13015 cmp->opcode = OP_LCOMPARE;
13016 else if (arg1->type == STACK_R4)
13017 cmp->opcode = OP_RCOMPARE;
13018 else if (arg1->type == STACK_R8)
13019 cmp->opcode = OP_FCOMPARE;
13021 cmp->opcode = OP_ICOMPARE;
13022 MONO_ADD_INS (cfg->cbb, cmp);
13023 ins->type = STACK_I4;
13024 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13025 type_from_op (cfg, ins, arg1, arg2);
13027 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13029 * The backends expect the fceq opcodes to do the
13032 ins->sreg1 = cmp->sreg1;
13033 ins->sreg2 = cmp->sreg2;
13036 MONO_ADD_INS (cfg->cbb, ins);
13042 MonoInst *argconst;
13043 MonoMethod *cil_method;
13045 CHECK_STACK_OVF (1);
13047 n = read32 (ip + 2);
13048 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13051 mono_class_init (cmethod->klass);
13053 mono_save_token_info (cfg, image, n, cmethod);
13055 context_used = mini_method_check_context_used (cfg, cmethod);
13057 cil_method = cmethod;
13058 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13059 METHOD_ACCESS_FAILURE (method, cil_method);
13061 if (mono_security_core_clr_enabled ())
13062 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13065 * Optimize the common case of ldftn+delegate creation
13067 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13068 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13069 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13070 MonoInst *target_ins, *handle_ins;
13071 MonoMethod *invoke;
13072 int invoke_context_used;
13074 invoke = mono_get_delegate_invoke (ctor_method->klass);
13075 if (!invoke || !mono_method_signature (invoke))
13078 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13080 target_ins = sp [-1];
13082 if (mono_security_core_clr_enabled ())
13083 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13085 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13086 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13087 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13089 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13093 /* FIXME: SGEN support */
13094 if (invoke_context_used == 0 || cfg->llvm_only) {
13096 if (cfg->verbose_level > 3)
13097 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13098 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13101 CHECK_CFG_EXCEPTION;
13111 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13112 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13116 inline_costs += 10 * num_calls++;
13119 case CEE_LDVIRTFTN: {
13120 MonoInst *args [2];
13124 n = read32 (ip + 2);
13125 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13128 mono_class_init (cmethod->klass);
13130 context_used = mini_method_check_context_used (cfg, cmethod);
13132 if (mono_security_core_clr_enabled ())
13133 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13136 * Optimize the common case of ldvirtftn+delegate creation
13138 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13139 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13140 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13141 MonoInst *target_ins, *handle_ins;
13142 MonoMethod *invoke;
13143 int invoke_context_used;
13144 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13146 invoke = mono_get_delegate_invoke (ctor_method->klass);
13147 if (!invoke || !mono_method_signature (invoke))
13150 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13152 target_ins = sp [-1];
13154 if (mono_security_core_clr_enabled ())
13155 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13157 /* FIXME: SGEN support */
13158 if (invoke_context_used == 0 || cfg->llvm_only) {
13160 if (cfg->verbose_level > 3)
13161 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13162 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13165 CHECK_CFG_EXCEPTION;
13178 args [1] = emit_get_rgctx_method (cfg, context_used,
13179 cmethod, MONO_RGCTX_INFO_METHOD);
13182 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13184 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13187 inline_costs += 10 * num_calls++;
13191 CHECK_STACK_OVF (1);
13193 n = read16 (ip + 2);
13195 EMIT_NEW_ARGLOAD (cfg, ins, n);
13200 CHECK_STACK_OVF (1);
13202 n = read16 (ip + 2);
13204 NEW_ARGLOADA (cfg, ins, n);
13205 MONO_ADD_INS (cfg->cbb, ins);
13213 n = read16 (ip + 2);
13215 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13217 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13221 CHECK_STACK_OVF (1);
13223 n = read16 (ip + 2);
13225 EMIT_NEW_LOCLOAD (cfg, ins, n);
13230 unsigned char *tmp_ip;
13231 CHECK_STACK_OVF (1);
13233 n = read16 (ip + 2);
13236 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13242 EMIT_NEW_LOCLOADA (cfg, ins, n);
13251 n = read16 (ip + 2);
13253 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13255 emit_stloc_ir (cfg, sp, header, n);
13262 if (sp != stack_start)
13264 if (cfg->method != method)
13266 * Inlining this into a loop in a parent could lead to
13267 * stack overflows which is different behavior than the
13268 * non-inlined case, thus disable inlining in this case.
13270 INLINE_FAILURE("localloc");
13272 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13273 ins->dreg = alloc_preg (cfg);
13274 ins->sreg1 = sp [0]->dreg;
13275 ins->type = STACK_PTR;
13276 MONO_ADD_INS (cfg->cbb, ins);
13278 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13280 ins->flags |= MONO_INST_INIT;
13285 case CEE_ENDFILTER: {
13286 MonoExceptionClause *clause, *nearest;
13291 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13293 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13294 ins->sreg1 = (*sp)->dreg;
13295 MONO_ADD_INS (cfg->cbb, ins);
13296 start_new_bblock = 1;
13300 for (cc = 0; cc < header->num_clauses; ++cc) {
13301 clause = &header->clauses [cc];
13302 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13303 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13304 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13307 g_assert (nearest);
13308 if ((ip - header->code) != nearest->handler_offset)
13313 case CEE_UNALIGNED_:
13314 ins_flag |= MONO_INST_UNALIGNED;
13315 /* FIXME: record alignment? we can assume 1 for now */
13319 case CEE_VOLATILE_:
13320 ins_flag |= MONO_INST_VOLATILE;
13324 ins_flag |= MONO_INST_TAILCALL;
13325 cfg->flags |= MONO_CFG_HAS_TAIL;
13326 /* Can't inline tail calls at this time */
13327 inline_costs += 100000;
13334 token = read32 (ip + 2);
13335 klass = mini_get_class (method, token, generic_context);
13336 CHECK_TYPELOAD (klass);
13337 if (generic_class_is_reference_type (cfg, klass))
13338 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13340 mini_emit_initobj (cfg, *sp, NULL, klass);
13344 case CEE_CONSTRAINED_:
13346 token = read32 (ip + 2);
13347 constrained_class = mini_get_class (method, token, generic_context);
13348 CHECK_TYPELOAD (constrained_class);
13352 case CEE_INITBLK: {
13353 MonoInst *iargs [3];
13357 /* Skip optimized paths for volatile operations. */
13358 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13359 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13360 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13361 /* emit_memset only works when val == 0 */
13362 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13365 iargs [0] = sp [0];
13366 iargs [1] = sp [1];
13367 iargs [2] = sp [2];
13368 if (ip [1] == CEE_CPBLK) {
13370 * FIXME: It's unclear whether we should be emitting both the acquire
13371 * and release barriers for cpblk. It is technically both a load and
13372 * store operation, so it seems like that's the sensible thing to do.
13374 * FIXME: We emit full barriers on both sides of the operation for
13375 * simplicity. We should have a separate atomic memcpy method instead.
13377 MonoMethod *memcpy_method = get_memcpy_method ();
13379 if (ins_flag & MONO_INST_VOLATILE)
13380 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13382 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13383 call->flags |= ins_flag;
13385 if (ins_flag & MONO_INST_VOLATILE)
13386 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13388 MonoMethod *memset_method = get_memset_method ();
13389 if (ins_flag & MONO_INST_VOLATILE) {
13390 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13391 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13393 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13394 call->flags |= ins_flag;
13405 ins_flag |= MONO_INST_NOTYPECHECK;
13407 ins_flag |= MONO_INST_NORANGECHECK;
13408 /* we ignore the no-nullcheck for now since we
13409 * really do it explicitly only when doing callvirt->call
13413 case CEE_RETHROW: {
13415 int handler_offset = -1;
13417 for (i = 0; i < header->num_clauses; ++i) {
13418 MonoExceptionClause *clause = &header->clauses [i];
13419 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13420 handler_offset = clause->handler_offset;
13425 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13427 if (handler_offset == -1)
13430 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13431 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13432 ins->sreg1 = load->dreg;
13433 MONO_ADD_INS (cfg->cbb, ins);
13435 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13436 MONO_ADD_INS (cfg->cbb, ins);
13439 link_bblock (cfg, cfg->cbb, end_bblock);
13440 start_new_bblock = 1;
13448 CHECK_STACK_OVF (1);
13450 token = read32 (ip + 2);
13451 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13452 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13455 val = mono_type_size (type, &ialign);
13457 MonoClass *klass = mini_get_class (method, token, generic_context);
13458 CHECK_TYPELOAD (klass);
13460 val = mono_type_size (&klass->byval_arg, &ialign);
13462 if (mini_is_gsharedvt_klass (klass))
13463 GSHAREDVT_FAILURE (*ip);
13465 EMIT_NEW_ICONST (cfg, ins, val);
13470 case CEE_REFANYTYPE: {
13471 MonoInst *src_var, *src;
13473 GSHAREDVT_FAILURE (*ip);
13479 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13481 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13482 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13483 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13488 case CEE_READONLY_:
13501 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13511 g_warning ("opcode 0x%02x not handled", *ip);
13515 if (start_new_bblock != 1)
13518 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13519 if (cfg->cbb->next_bb) {
13520 /* This could already be set because of inlining, #693905 */
13521 MonoBasicBlock *bb = cfg->cbb;
13523 while (bb->next_bb)
13525 bb->next_bb = end_bblock;
13527 cfg->cbb->next_bb = end_bblock;
13530 if (cfg->method == method && cfg->domainvar) {
13532 MonoInst *get_domain;
13534 cfg->cbb = init_localsbb;
13536 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13537 MONO_ADD_INS (cfg->cbb, get_domain);
13539 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13541 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13542 MONO_ADD_INS (cfg->cbb, store);
13545 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13546 if (cfg->compile_aot)
13547 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13548 mono_get_got_var (cfg);
13551 if (cfg->method == method && cfg->got_var)
13552 mono_emit_load_got_addr (cfg);
13554 if (init_localsbb) {
13555 cfg->cbb = init_localsbb;
13557 for (i = 0; i < header->num_locals; ++i) {
13558 emit_init_local (cfg, i, header->locals [i], init_locals);
13562 if (cfg->init_ref_vars && cfg->method == method) {
13563 /* Emit initialization for ref vars */
13564 // FIXME: Avoid duplication initialization for IL locals.
13565 for (i = 0; i < cfg->num_varinfo; ++i) {
13566 MonoInst *ins = cfg->varinfo [i];
13568 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13569 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13573 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13574 cfg->cbb = init_localsbb;
13575 emit_push_lmf (cfg);
13578 cfg->cbb = init_localsbb;
13579 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13582 MonoBasicBlock *bb;
13585 * Make seq points at backward branch targets interruptable.
13587 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13588 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13589 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13592 /* Add a sequence point for method entry/exit events */
13593 if (seq_points && cfg->gen_sdb_seq_points) {
13594 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13595 MONO_ADD_INS (init_localsbb, ins);
13596 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13597 MONO_ADD_INS (cfg->bb_exit, ins);
13601 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13602 * the code they refer to was dead (#11880).
13604 if (sym_seq_points) {
13605 for (i = 0; i < header->code_size; ++i) {
13606 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13609 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13610 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13617 if (cfg->method == method) {
13618 MonoBasicBlock *bb;
13619 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13620 bb->region = mono_find_block_region (cfg, bb->real_offset);
13622 mono_create_spvar_for_region (cfg, bb->region);
13623 if (cfg->verbose_level > 2)
13624 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13628 if (inline_costs < 0) {
13631 /* Method is too large */
13632 mname = mono_method_full_name (method, TRUE);
13633 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13637 if ((cfg->verbose_level > 2) && (cfg->method == method))
13638 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13643 g_assert (!mono_error_ok (&cfg->error));
13647 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13651 set_exception_type_from_invalid_il (cfg, method, ip);
13655 g_slist_free (class_inits);
13656 mono_basic_block_free (original_bb);
13657 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13658 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13659 if (cfg->exception_type)
13662 return inline_costs;
13666 store_membase_reg_to_store_membase_imm (int opcode)
13669 case OP_STORE_MEMBASE_REG:
13670 return OP_STORE_MEMBASE_IMM;
13671 case OP_STOREI1_MEMBASE_REG:
13672 return OP_STOREI1_MEMBASE_IMM;
13673 case OP_STOREI2_MEMBASE_REG:
13674 return OP_STOREI2_MEMBASE_IMM;
13675 case OP_STOREI4_MEMBASE_REG:
13676 return OP_STOREI4_MEMBASE_IMM;
13677 case OP_STOREI8_MEMBASE_REG:
13678 return OP_STOREI8_MEMBASE_IMM;
13680 g_assert_not_reached ();
13687 mono_op_to_op_imm (int opcode)
13691 return OP_IADD_IMM;
13693 return OP_ISUB_IMM;
13695 return OP_IDIV_IMM;
13697 return OP_IDIV_UN_IMM;
13699 return OP_IREM_IMM;
13701 return OP_IREM_UN_IMM;
13703 return OP_IMUL_IMM;
13705 return OP_IAND_IMM;
13709 return OP_IXOR_IMM;
13711 return OP_ISHL_IMM;
13713 return OP_ISHR_IMM;
13715 return OP_ISHR_UN_IMM;
13718 return OP_LADD_IMM;
13720 return OP_LSUB_IMM;
13722 return OP_LAND_IMM;
13726 return OP_LXOR_IMM;
13728 return OP_LSHL_IMM;
13730 return OP_LSHR_IMM;
13732 return OP_LSHR_UN_IMM;
13733 #if SIZEOF_REGISTER == 8
13735 return OP_LREM_IMM;
13739 return OP_COMPARE_IMM;
13741 return OP_ICOMPARE_IMM;
13743 return OP_LCOMPARE_IMM;
13745 case OP_STORE_MEMBASE_REG:
13746 return OP_STORE_MEMBASE_IMM;
13747 case OP_STOREI1_MEMBASE_REG:
13748 return OP_STOREI1_MEMBASE_IMM;
13749 case OP_STOREI2_MEMBASE_REG:
13750 return OP_STOREI2_MEMBASE_IMM;
13751 case OP_STOREI4_MEMBASE_REG:
13752 return OP_STOREI4_MEMBASE_IMM;
13754 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13756 return OP_X86_PUSH_IMM;
13757 case OP_X86_COMPARE_MEMBASE_REG:
13758 return OP_X86_COMPARE_MEMBASE_IMM;
13760 #if defined(TARGET_AMD64)
13761 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13762 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13764 case OP_VOIDCALL_REG:
13765 return OP_VOIDCALL;
13773 return OP_LOCALLOC_IMM;
13780 ldind_to_load_membase (int opcode)
13784 return OP_LOADI1_MEMBASE;
13786 return OP_LOADU1_MEMBASE;
13788 return OP_LOADI2_MEMBASE;
13790 return OP_LOADU2_MEMBASE;
13792 return OP_LOADI4_MEMBASE;
13794 return OP_LOADU4_MEMBASE;
13796 return OP_LOAD_MEMBASE;
13797 case CEE_LDIND_REF:
13798 return OP_LOAD_MEMBASE;
13800 return OP_LOADI8_MEMBASE;
13802 return OP_LOADR4_MEMBASE;
13804 return OP_LOADR8_MEMBASE;
13806 g_assert_not_reached ();
13813 stind_to_store_membase (int opcode)
13817 return OP_STOREI1_MEMBASE_REG;
13819 return OP_STOREI2_MEMBASE_REG;
13821 return OP_STOREI4_MEMBASE_REG;
13823 case CEE_STIND_REF:
13824 return OP_STORE_MEMBASE_REG;
13826 return OP_STOREI8_MEMBASE_REG;
13828 return OP_STORER4_MEMBASE_REG;
13830 return OP_STORER8_MEMBASE_REG;
13832 g_assert_not_reached ();
13839 mono_load_membase_to_load_mem (int opcode)
13841 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13842 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13844 case OP_LOAD_MEMBASE:
13845 return OP_LOAD_MEM;
13846 case OP_LOADU1_MEMBASE:
13847 return OP_LOADU1_MEM;
13848 case OP_LOADU2_MEMBASE:
13849 return OP_LOADU2_MEM;
13850 case OP_LOADI4_MEMBASE:
13851 return OP_LOADI4_MEM;
13852 case OP_LOADU4_MEMBASE:
13853 return OP_LOADU4_MEM;
13854 #if SIZEOF_REGISTER == 8
13855 case OP_LOADI8_MEMBASE:
13856 return OP_LOADI8_MEM;
13865 op_to_op_dest_membase (int store_opcode, int opcode)
13867 #if defined(TARGET_X86)
13868 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13873 return OP_X86_ADD_MEMBASE_REG;
13875 return OP_X86_SUB_MEMBASE_REG;
13877 return OP_X86_AND_MEMBASE_REG;
13879 return OP_X86_OR_MEMBASE_REG;
13881 return OP_X86_XOR_MEMBASE_REG;
13884 return OP_X86_ADD_MEMBASE_IMM;
13887 return OP_X86_SUB_MEMBASE_IMM;
13890 return OP_X86_AND_MEMBASE_IMM;
13893 return OP_X86_OR_MEMBASE_IMM;
13896 return OP_X86_XOR_MEMBASE_IMM;
13902 #if defined(TARGET_AMD64)
13903 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13908 return OP_X86_ADD_MEMBASE_REG;
13910 return OP_X86_SUB_MEMBASE_REG;
13912 return OP_X86_AND_MEMBASE_REG;
13914 return OP_X86_OR_MEMBASE_REG;
13916 return OP_X86_XOR_MEMBASE_REG;
13918 return OP_X86_ADD_MEMBASE_IMM;
13920 return OP_X86_SUB_MEMBASE_IMM;
13922 return OP_X86_AND_MEMBASE_IMM;
13924 return OP_X86_OR_MEMBASE_IMM;
13926 return OP_X86_XOR_MEMBASE_IMM;
13928 return OP_AMD64_ADD_MEMBASE_REG;
13930 return OP_AMD64_SUB_MEMBASE_REG;
13932 return OP_AMD64_AND_MEMBASE_REG;
13934 return OP_AMD64_OR_MEMBASE_REG;
13936 return OP_AMD64_XOR_MEMBASE_REG;
13939 return OP_AMD64_ADD_MEMBASE_IMM;
13942 return OP_AMD64_SUB_MEMBASE_IMM;
13945 return OP_AMD64_AND_MEMBASE_IMM;
13948 return OP_AMD64_OR_MEMBASE_IMM;
13951 return OP_AMD64_XOR_MEMBASE_IMM;
13961 op_to_op_store_membase (int store_opcode, int opcode)
13963 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13966 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13967 return OP_X86_SETEQ_MEMBASE;
13969 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13970 return OP_X86_SETNE_MEMBASE;
13978 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13981 /* FIXME: This has sign extension issues */
13983 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13984 return OP_X86_COMPARE_MEMBASE8_IMM;
13987 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13992 return OP_X86_PUSH_MEMBASE;
13993 case OP_COMPARE_IMM:
13994 case OP_ICOMPARE_IMM:
13995 return OP_X86_COMPARE_MEMBASE_IMM;
13998 return OP_X86_COMPARE_MEMBASE_REG;
14002 #ifdef TARGET_AMD64
14003 /* FIXME: This has sign extension issues */
14005 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14006 return OP_X86_COMPARE_MEMBASE8_IMM;
14011 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14012 return OP_X86_PUSH_MEMBASE;
14014 /* FIXME: This only works for 32 bit immediates
14015 case OP_COMPARE_IMM:
14016 case OP_LCOMPARE_IMM:
14017 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14018 return OP_AMD64_COMPARE_MEMBASE_IMM;
14020 case OP_ICOMPARE_IMM:
14021 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14022 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14026 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14027 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14028 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14029 return OP_AMD64_COMPARE_MEMBASE_REG;
14032 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14033 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14042 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14045 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14051 return OP_X86_COMPARE_REG_MEMBASE;
14053 return OP_X86_ADD_REG_MEMBASE;
14055 return OP_X86_SUB_REG_MEMBASE;
14057 return OP_X86_AND_REG_MEMBASE;
14059 return OP_X86_OR_REG_MEMBASE;
14061 return OP_X86_XOR_REG_MEMBASE;
14065 #ifdef TARGET_AMD64
14066 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14069 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14071 return OP_X86_ADD_REG_MEMBASE;
14073 return OP_X86_SUB_REG_MEMBASE;
14075 return OP_X86_AND_REG_MEMBASE;
14077 return OP_X86_OR_REG_MEMBASE;
14079 return OP_X86_XOR_REG_MEMBASE;
14081 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14085 return OP_AMD64_COMPARE_REG_MEMBASE;
14087 return OP_AMD64_ADD_REG_MEMBASE;
14089 return OP_AMD64_SUB_REG_MEMBASE;
14091 return OP_AMD64_AND_REG_MEMBASE;
14093 return OP_AMD64_OR_REG_MEMBASE;
14095 return OP_AMD64_XOR_REG_MEMBASE;
14104 mono_op_to_op_imm_noemul (int opcode)
14107 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14113 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14120 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14125 return mono_op_to_op_imm (opcode);
14130 * mono_handle_global_vregs:
14132 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14136 mono_handle_global_vregs (MonoCompile *cfg)
14138 gint32 *vreg_to_bb;
14139 MonoBasicBlock *bb;
14142 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14144 #ifdef MONO_ARCH_SIMD_INTRINSICS
14145 if (cfg->uses_simd_intrinsics)
14146 mono_simd_simplify_indirection (cfg);
14149 /* Find local vregs used in more than one bb */
14150 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14151 MonoInst *ins = bb->code;
14152 int block_num = bb->block_num;
14154 if (cfg->verbose_level > 2)
14155 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14158 for (; ins; ins = ins->next) {
14159 const char *spec = INS_INFO (ins->opcode);
14160 int regtype = 0, regindex;
14163 if (G_UNLIKELY (cfg->verbose_level > 2))
14164 mono_print_ins (ins);
14166 g_assert (ins->opcode >= MONO_CEE_LAST);
14168 for (regindex = 0; regindex < 4; regindex ++) {
14171 if (regindex == 0) {
14172 regtype = spec [MONO_INST_DEST];
14173 if (regtype == ' ')
14176 } else if (regindex == 1) {
14177 regtype = spec [MONO_INST_SRC1];
14178 if (regtype == ' ')
14181 } else if (regindex == 2) {
14182 regtype = spec [MONO_INST_SRC2];
14183 if (regtype == ' ')
14186 } else if (regindex == 3) {
14187 regtype = spec [MONO_INST_SRC3];
14188 if (regtype == ' ')
14193 #if SIZEOF_REGISTER == 4
14194 /* In the LLVM case, the long opcodes are not decomposed */
14195 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14197 * Since some instructions reference the original long vreg,
14198 * and some reference the two component vregs, it is quite hard
14199 * to determine when it needs to be global. So be conservative.
14201 if (!get_vreg_to_inst (cfg, vreg)) {
14202 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14204 if (cfg->verbose_level > 2)
14205 printf ("LONG VREG R%d made global.\n", vreg);
14209 * Make the component vregs volatile since the optimizations can
14210 * get confused otherwise.
14212 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14213 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14217 g_assert (vreg != -1);
14219 prev_bb = vreg_to_bb [vreg];
14220 if (prev_bb == 0) {
14221 /* 0 is a valid block num */
14222 vreg_to_bb [vreg] = block_num + 1;
14223 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14224 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14227 if (!get_vreg_to_inst (cfg, vreg)) {
14228 if (G_UNLIKELY (cfg->verbose_level > 2))
14229 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14233 if (vreg_is_ref (cfg, vreg))
14234 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14236 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14239 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14242 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14245 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14248 g_assert_not_reached ();
14252 /* Flag as having been used in more than one bb */
14253 vreg_to_bb [vreg] = -1;
14259 /* If a variable is used in only one bblock, convert it into a local vreg */
14260 for (i = 0; i < cfg->num_varinfo; i++) {
14261 MonoInst *var = cfg->varinfo [i];
14262 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14264 switch (var->type) {
14270 #if SIZEOF_REGISTER == 8
14273 #if !defined(TARGET_X86)
14274 /* Enabling this screws up the fp stack on x86 */
14277 if (mono_arch_is_soft_float ())
14281 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14285 /* Arguments are implicitly global */
14286 /* Putting R4 vars into registers doesn't work currently */
14287 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14288 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14290 * Make that the variable's liveness interval doesn't contain a call, since
14291 * that would cause the lvreg to be spilled, making the whole optimization
14294 /* This is too slow for JIT compilation */
14296 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14298 int def_index, call_index, ins_index;
14299 gboolean spilled = FALSE;
14304 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14305 const char *spec = INS_INFO (ins->opcode);
14307 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14308 def_index = ins_index;
14310 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14311 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14312 if (call_index > def_index) {
14318 if (MONO_IS_CALL (ins))
14319 call_index = ins_index;
14329 if (G_UNLIKELY (cfg->verbose_level > 2))
14330 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14331 var->flags |= MONO_INST_IS_DEAD;
14332 cfg->vreg_to_inst [var->dreg] = NULL;
14339 * Compress the varinfo and vars tables so the liveness computation is faster and
14340 * takes up less space.
14343 for (i = 0; i < cfg->num_varinfo; ++i) {
14344 MonoInst *var = cfg->varinfo [i];
14345 if (pos < i && cfg->locals_start == i)
14346 cfg->locals_start = pos;
14347 if (!(var->flags & MONO_INST_IS_DEAD)) {
14349 cfg->varinfo [pos] = cfg->varinfo [i];
14350 cfg->varinfo [pos]->inst_c0 = pos;
14351 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14352 cfg->vars [pos].idx = pos;
14353 #if SIZEOF_REGISTER == 4
14354 if (cfg->varinfo [pos]->type == STACK_I8) {
14355 /* Modify the two component vars too */
14358 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14359 var1->inst_c0 = pos;
14360 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14361 var1->inst_c0 = pos;
14368 cfg->num_varinfo = pos;
14369 if (cfg->locals_start > cfg->num_varinfo)
14370 cfg->locals_start = cfg->num_varinfo;
14374 * mono_allocate_gsharedvt_vars:
14376 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14377 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14380 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14384 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14386 for (i = 0; i < cfg->num_varinfo; ++i) {
14387 MonoInst *ins = cfg->varinfo [i];
14390 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14391 if (i >= cfg->locals_start) {
14393 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14394 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14395 ins->opcode = OP_GSHAREDVT_LOCAL;
14396 ins->inst_imm = idx;
14399 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14400 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14407 * mono_spill_global_vars:
14409 * Generate spill code for variables which are not allocated to registers,
14410 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14411 * code is generated which could be optimized by the local optimization passes.
14414 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14416 MonoBasicBlock *bb;
14418 int orig_next_vreg;
14419 guint32 *vreg_to_lvreg;
14421 guint32 i, lvregs_len;
14422 gboolean dest_has_lvreg = FALSE;
14423 MonoStackType stacktypes [128];
14424 MonoInst **live_range_start, **live_range_end;
14425 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14427 *need_local_opts = FALSE;
14429 memset (spec2, 0, sizeof (spec2));
14431 /* FIXME: Move this function to mini.c */
14432 stacktypes ['i'] = STACK_PTR;
14433 stacktypes ['l'] = STACK_I8;
14434 stacktypes ['f'] = STACK_R8;
14435 #ifdef MONO_ARCH_SIMD_INTRINSICS
14436 stacktypes ['x'] = STACK_VTYPE;
14439 #if SIZEOF_REGISTER == 4
14440 /* Create MonoInsts for longs */
14441 for (i = 0; i < cfg->num_varinfo; i++) {
14442 MonoInst *ins = cfg->varinfo [i];
14444 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14445 switch (ins->type) {
14450 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14453 g_assert (ins->opcode == OP_REGOFFSET);
14455 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14457 tree->opcode = OP_REGOFFSET;
14458 tree->inst_basereg = ins->inst_basereg;
14459 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14461 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14463 tree->opcode = OP_REGOFFSET;
14464 tree->inst_basereg = ins->inst_basereg;
14465 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14475 if (cfg->compute_gc_maps) {
14476 /* registers need liveness info even for !non refs */
14477 for (i = 0; i < cfg->num_varinfo; i++) {
14478 MonoInst *ins = cfg->varinfo [i];
14480 if (ins->opcode == OP_REGVAR)
14481 ins->flags |= MONO_INST_GC_TRACK;
14485 /* FIXME: widening and truncation */
14488 * As an optimization, when a variable allocated to the stack is first loaded into
14489 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14490 * the variable again.
14492 orig_next_vreg = cfg->next_vreg;
14493 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14494 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14498 * These arrays contain the first and last instructions accessing a given
14500 * Since we emit bblocks in the same order we process them here, and we
14501 * don't split live ranges, these will precisely describe the live range of
14502 * the variable, i.e. the instruction range where a valid value can be found
14503 * in the variables location.
14504 * The live range is computed using the liveness info computed by the liveness pass.
14505 * We can't use vmv->range, since that is an abstract live range, and we need
14506 * one which is instruction precise.
14507 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14509 /* FIXME: Only do this if debugging info is requested */
14510 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14511 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14512 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14513 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14515 /* Add spill loads/stores */
14516 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14519 if (cfg->verbose_level > 2)
14520 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14522 /* Clear vreg_to_lvreg array */
14523 for (i = 0; i < lvregs_len; i++)
14524 vreg_to_lvreg [lvregs [i]] = 0;
14528 MONO_BB_FOR_EACH_INS (bb, ins) {
14529 const char *spec = INS_INFO (ins->opcode);
14530 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14531 gboolean store, no_lvreg;
14532 int sregs [MONO_MAX_SRC_REGS];
14534 if (G_UNLIKELY (cfg->verbose_level > 2))
14535 mono_print_ins (ins);
14537 if (ins->opcode == OP_NOP)
14541 * We handle LDADDR here as well, since it can only be decomposed
14542 * when variable addresses are known.
14544 if (ins->opcode == OP_LDADDR) {
14545 MonoInst *var = (MonoInst *)ins->inst_p0;
14547 if (var->opcode == OP_VTARG_ADDR) {
14548 /* Happens on SPARC/S390 where vtypes are passed by reference */
14549 MonoInst *vtaddr = var->inst_left;
14550 if (vtaddr->opcode == OP_REGVAR) {
14551 ins->opcode = OP_MOVE;
14552 ins->sreg1 = vtaddr->dreg;
14554 else if (var->inst_left->opcode == OP_REGOFFSET) {
14555 ins->opcode = OP_LOAD_MEMBASE;
14556 ins->inst_basereg = vtaddr->inst_basereg;
14557 ins->inst_offset = vtaddr->inst_offset;
14560 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14561 /* gsharedvt arg passed by ref */
14562 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14564 ins->opcode = OP_LOAD_MEMBASE;
14565 ins->inst_basereg = var->inst_basereg;
14566 ins->inst_offset = var->inst_offset;
14567 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14568 MonoInst *load, *load2, *load3;
14569 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14570 int reg1, reg2, reg3;
14571 MonoInst *info_var = cfg->gsharedvt_info_var;
14572 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14576 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14579 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14581 g_assert (info_var);
14582 g_assert (locals_var);
14584 /* Mark the instruction used to compute the locals var as used */
14585 cfg->gsharedvt_locals_var_ins = NULL;
14587 /* Load the offset */
14588 if (info_var->opcode == OP_REGOFFSET) {
14589 reg1 = alloc_ireg (cfg);
14590 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14591 } else if (info_var->opcode == OP_REGVAR) {
14593 reg1 = info_var->dreg;
14595 g_assert_not_reached ();
14597 reg2 = alloc_ireg (cfg);
14598 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14599 /* Load the locals area address */
14600 reg3 = alloc_ireg (cfg);
14601 if (locals_var->opcode == OP_REGOFFSET) {
14602 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14603 } else if (locals_var->opcode == OP_REGVAR) {
14604 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14606 g_assert_not_reached ();
14608 /* Compute the address */
14609 ins->opcode = OP_PADD;
14613 mono_bblock_insert_before_ins (bb, ins, load3);
14614 mono_bblock_insert_before_ins (bb, load3, load2);
14616 mono_bblock_insert_before_ins (bb, load2, load);
14618 g_assert (var->opcode == OP_REGOFFSET);
14620 ins->opcode = OP_ADD_IMM;
14621 ins->sreg1 = var->inst_basereg;
14622 ins->inst_imm = var->inst_offset;
14625 *need_local_opts = TRUE;
14626 spec = INS_INFO (ins->opcode);
14629 if (ins->opcode < MONO_CEE_LAST) {
14630 mono_print_ins (ins);
14631 g_assert_not_reached ();
14635 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14639 if (MONO_IS_STORE_MEMBASE (ins)) {
14640 tmp_reg = ins->dreg;
14641 ins->dreg = ins->sreg2;
14642 ins->sreg2 = tmp_reg;
14645 spec2 [MONO_INST_DEST] = ' ';
14646 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14647 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14648 spec2 [MONO_INST_SRC3] = ' ';
14650 } else if (MONO_IS_STORE_MEMINDEX (ins))
14651 g_assert_not_reached ();
14656 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14657 printf ("\t %.3s %d", spec, ins->dreg);
14658 num_sregs = mono_inst_get_src_registers (ins, sregs);
14659 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14660 printf (" %d", sregs [srcindex]);
14667 regtype = spec [MONO_INST_DEST];
14668 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14671 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14672 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14673 MonoInst *store_ins;
14675 MonoInst *def_ins = ins;
14676 int dreg = ins->dreg; /* The original vreg */
14678 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14680 if (var->opcode == OP_REGVAR) {
14681 ins->dreg = var->dreg;
14682 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14684 * Instead of emitting a load+store, use a _membase opcode.
14686 g_assert (var->opcode == OP_REGOFFSET);
14687 if (ins->opcode == OP_MOVE) {
14691 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14692 ins->inst_basereg = var->inst_basereg;
14693 ins->inst_offset = var->inst_offset;
14696 spec = INS_INFO (ins->opcode);
14700 g_assert (var->opcode == OP_REGOFFSET);
14702 prev_dreg = ins->dreg;
14704 /* Invalidate any previous lvreg for this vreg */
14705 vreg_to_lvreg [ins->dreg] = 0;
14709 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14711 store_opcode = OP_STOREI8_MEMBASE_REG;
14714 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14716 #if SIZEOF_REGISTER != 8
14717 if (regtype == 'l') {
14718 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14719 mono_bblock_insert_after_ins (bb, ins, store_ins);
14720 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14721 mono_bblock_insert_after_ins (bb, ins, store_ins);
14722 def_ins = store_ins;
14727 g_assert (store_opcode != OP_STOREV_MEMBASE);
14729 /* Try to fuse the store into the instruction itself */
14730 /* FIXME: Add more instructions */
14731 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14732 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14733 ins->inst_imm = ins->inst_c0;
14734 ins->inst_destbasereg = var->inst_basereg;
14735 ins->inst_offset = var->inst_offset;
14736 spec = INS_INFO (ins->opcode);
14737 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14738 ins->opcode = store_opcode;
14739 ins->inst_destbasereg = var->inst_basereg;
14740 ins->inst_offset = var->inst_offset;
14744 tmp_reg = ins->dreg;
14745 ins->dreg = ins->sreg2;
14746 ins->sreg2 = tmp_reg;
14749 spec2 [MONO_INST_DEST] = ' ';
14750 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14751 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14752 spec2 [MONO_INST_SRC3] = ' ';
14754 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14755 // FIXME: The backends expect the base reg to be in inst_basereg
14756 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14758 ins->inst_basereg = var->inst_basereg;
14759 ins->inst_offset = var->inst_offset;
14760 spec = INS_INFO (ins->opcode);
14762 /* printf ("INS: "); mono_print_ins (ins); */
14763 /* Create a store instruction */
14764 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14766 /* Insert it after the instruction */
14767 mono_bblock_insert_after_ins (bb, ins, store_ins);
14769 def_ins = store_ins;
14772 * We can't assign ins->dreg to var->dreg here, since the
14773 * sregs could use it. So set a flag, and do it after
14776 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14777 dest_has_lvreg = TRUE;
14782 if (def_ins && !live_range_start [dreg]) {
14783 live_range_start [dreg] = def_ins;
14784 live_range_start_bb [dreg] = bb;
14787 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14790 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14791 tmp->inst_c1 = dreg;
14792 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14799 num_sregs = mono_inst_get_src_registers (ins, sregs);
14800 for (srcindex = 0; srcindex < 3; ++srcindex) {
14801 regtype = spec [MONO_INST_SRC1 + srcindex];
14802 sreg = sregs [srcindex];
14804 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14805 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14806 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14807 MonoInst *use_ins = ins;
14808 MonoInst *load_ins;
14809 guint32 load_opcode;
14811 if (var->opcode == OP_REGVAR) {
14812 sregs [srcindex] = var->dreg;
14813 //mono_inst_set_src_registers (ins, sregs);
14814 live_range_end [sreg] = use_ins;
14815 live_range_end_bb [sreg] = bb;
14817 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14820 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14821 /* var->dreg is a hreg */
14822 tmp->inst_c1 = sreg;
14823 mono_bblock_insert_after_ins (bb, ins, tmp);
14829 g_assert (var->opcode == OP_REGOFFSET);
14831 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14833 g_assert (load_opcode != OP_LOADV_MEMBASE);
14835 if (vreg_to_lvreg [sreg]) {
14836 g_assert (vreg_to_lvreg [sreg] != -1);
14838 /* The variable is already loaded to an lvreg */
14839 if (G_UNLIKELY (cfg->verbose_level > 2))
14840 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14841 sregs [srcindex] = vreg_to_lvreg [sreg];
14842 //mono_inst_set_src_registers (ins, sregs);
14846 /* Try to fuse the load into the instruction */
14847 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14848 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14849 sregs [0] = var->inst_basereg;
14850 //mono_inst_set_src_registers (ins, sregs);
14851 ins->inst_offset = var->inst_offset;
14852 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14853 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14854 sregs [1] = var->inst_basereg;
14855 //mono_inst_set_src_registers (ins, sregs);
14856 ins->inst_offset = var->inst_offset;
14858 if (MONO_IS_REAL_MOVE (ins)) {
14859 ins->opcode = OP_NOP;
14862 //printf ("%d ", srcindex); mono_print_ins (ins);
14864 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14866 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14867 if (var->dreg == prev_dreg) {
14869 * sreg refers to the value loaded by the load
14870 * emitted below, but we need to use ins->dreg
14871 * since it refers to the store emitted earlier.
14875 g_assert (sreg != -1);
14876 vreg_to_lvreg [var->dreg] = sreg;
14877 g_assert (lvregs_len < 1024);
14878 lvregs [lvregs_len ++] = var->dreg;
14882 sregs [srcindex] = sreg;
14883 //mono_inst_set_src_registers (ins, sregs);
14885 #if SIZEOF_REGISTER != 8
14886 if (regtype == 'l') {
14887 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14888 mono_bblock_insert_before_ins (bb, ins, load_ins);
14889 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14890 mono_bblock_insert_before_ins (bb, ins, load_ins);
14891 use_ins = load_ins;
14896 #if SIZEOF_REGISTER == 4
14897 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14899 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14900 mono_bblock_insert_before_ins (bb, ins, load_ins);
14901 use_ins = load_ins;
14905 if (var->dreg < orig_next_vreg) {
14906 live_range_end [var->dreg] = use_ins;
14907 live_range_end_bb [var->dreg] = bb;
14910 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14913 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14914 tmp->inst_c1 = var->dreg;
14915 mono_bblock_insert_after_ins (bb, ins, tmp);
14919 mono_inst_set_src_registers (ins, sregs);
14921 if (dest_has_lvreg) {
14922 g_assert (ins->dreg != -1);
14923 vreg_to_lvreg [prev_dreg] = ins->dreg;
14924 g_assert (lvregs_len < 1024);
14925 lvregs [lvregs_len ++] = prev_dreg;
14926 dest_has_lvreg = FALSE;
14930 tmp_reg = ins->dreg;
14931 ins->dreg = ins->sreg2;
14932 ins->sreg2 = tmp_reg;
14935 if (MONO_IS_CALL (ins)) {
14936 /* Clear vreg_to_lvreg array */
14937 for (i = 0; i < lvregs_len; i++)
14938 vreg_to_lvreg [lvregs [i]] = 0;
14940 } else if (ins->opcode == OP_NOP) {
14942 MONO_INST_NULLIFY_SREGS (ins);
14945 if (cfg->verbose_level > 2)
14946 mono_print_ins_index (1, ins);
14949 /* Extend the live range based on the liveness info */
14950 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14951 for (i = 0; i < cfg->num_varinfo; i ++) {
14952 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14954 if (vreg_is_volatile (cfg, vi->vreg))
14955 /* The liveness info is incomplete */
14958 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14959 /* Live from at least the first ins of this bb */
14960 live_range_start [vi->vreg] = bb->code;
14961 live_range_start_bb [vi->vreg] = bb;
14964 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14965 /* Live at least until the last ins of this bb */
14966 live_range_end [vi->vreg] = bb->last_ins;
14967 live_range_end_bb [vi->vreg] = bb;
14974 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14975 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14977 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14978 for (i = 0; i < cfg->num_varinfo; ++i) {
14979 int vreg = MONO_VARINFO (cfg, i)->vreg;
14982 if (live_range_start [vreg]) {
14983 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14985 ins->inst_c1 = vreg;
14986 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14988 if (live_range_end [vreg]) {
14989 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14991 ins->inst_c1 = vreg;
14992 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14993 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14995 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15000 if (cfg->gsharedvt_locals_var_ins) {
15001 /* Nullify if unused */
15002 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15003 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15006 g_free (live_range_start);
15007 g_free (live_range_end);
15008 g_free (live_range_start_bb);
15009 g_free (live_range_end_bb);
15014 * - use 'iadd' instead of 'int_add'
15015 * - handling ovf opcodes: decompose in method_to_ir.
15016 * - unify iregs/fregs
15017 * -> partly done, the missing parts are:
15018 * - a more complete unification would involve unifying the hregs as well, so
15019 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15020 * would no longer map to the machine hregs, so the code generators would need to
15021 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15022 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15023 * fp/non-fp branches speeds it up by about 15%.
15024 * - use sext/zext opcodes instead of shifts
15026 * - get rid of TEMPLOADs if possible and use vregs instead
15027 * - clean up usage of OP_P/OP_ opcodes
15028 * - cleanup usage of DUMMY_USE
15029 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15031 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15032 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15033 * - make sure handle_stack_args () is called before the branch is emitted
15034 * - when the new IR is done, get rid of all unused stuff
15035 * - COMPARE/BEQ as separate instructions or unify them ?
15036 * - keeping them separate allows specialized compare instructions like
15037 * compare_imm, compare_membase
15038 * - most back ends unify fp compare+branch, fp compare+ceq
15039 * - integrate mono_save_args into inline_method
15040 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15041 * - handle long shift opts on 32 bit platforms somehow: they require
15042 * 3 sregs (2 for arg1 and 1 for arg2)
15043 * - make byref a 'normal' type.
15044 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15045 * variable if needed.
15046 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15047 * like inline_method.
15048 * - remove inlining restrictions
15049 * - fix LNEG and enable cfold of INEG
15050 * - generalize x86 optimizations like ldelema as a peephole optimization
15051 * - add store_mem_imm for amd64
15052 * - optimize the loading of the interruption flag in the managed->native wrappers
15053 * - avoid special handling of OP_NOP in passes
15054 * - move code inserting instructions into one function/macro.
15055 * - try a coalescing phase after liveness analysis
15056 * - add float -> vreg conversion + local optimizations on !x86
15057 * - figure out how to handle decomposed branches during optimizations, ie.
15058 * compare+branch, op_jump_table+op_br etc.
15059 * - promote RuntimeXHandles to vregs
15060 * - vtype cleanups:
15061 * - add a NEW_VARLOADA_VREG macro
15062 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15063 * accessing vtype fields.
15064 * - get rid of I8CONST on 64 bit platforms
15065 * - dealing with the increase in code size due to branches created during opcode
15067 * - use extended basic blocks
15068 * - all parts of the JIT
15069 * - handle_global_vregs () && local regalloc
15070 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15071 * - sources of increase in code size:
15074 * - isinst and castclass
15075 * - lvregs not allocated to global registers even if used multiple times
15076 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15078 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15079 * - add all micro optimizations from the old JIT
15080 * - put tree optimizations into the deadce pass
15081 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15082 * specific function.
15083 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15084 * fcompare + branchCC.
15085 * - create a helper function for allocating a stack slot, taking into account
15086 * MONO_CFG_HAS_SPILLUP.
15088 * - merge the ia64 switch changes.
15089 * - optimize mono_regstate2_alloc_int/float.
15090 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15091 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15092 * parts of the tree could be separated by other instructions, killing the tree
15093 * arguments, or stores killing loads etc. Also, should we fold loads into other
15094 * instructions if the result of the load is used multiple times ?
15095 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15096 * - LAST MERGE: 108395.
15097 * - when returning vtypes in registers, generate IR and append it to the end of the
15098 * last bb instead of doing it in the epilog.
15099 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15107 - When to decompose opcodes:
15108 - earlier: this makes some optimizations hard to implement, since the low level IR
15109 no longer contains the neccessary information. But it is easier to do.
15110 - later: harder to implement, enables more optimizations.
15111 - Branches inside bblocks:
15112 - created when decomposing complex opcodes.
15113 - branches to another bblock: harmless, but not tracked by the branch
15114 optimizations, so need to branch to a label at the start of the bblock.
15115 - branches to inside the same bblock: very problematic, trips up the local
15116 reg allocator. Can be fixed by spitting the current bblock, but that is a
15117 complex operation, since some local vregs can become global vregs etc.
15118 - Local/global vregs:
15119 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15120 local register allocator.
15121 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15122 structure, created by mono_create_var (). Assigned to hregs or the stack by
15123 the global register allocator.
15124 - When to do optimizations like alu->alu_imm:
15125 - earlier -> saves work later on since the IR will be smaller/simpler
15126 - later -> can work on more instructions
15127 - Handling of valuetypes:
15128 - When a vtype is pushed on the stack, a new temporary is created, an
15129 instruction computing its address (LDADDR) is emitted and pushed on
15130 the stack. Need to optimize cases when the vtype is used immediately as in
15131 argument passing, stloc etc.
15132 - Instead of the to_end stuff in the old JIT, simply call the function handling
15133 the values on the stack before emitting the last instruction of the bb.
15136 #endif /* DISABLE_JIT */