2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/monitor.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
70 #include "jit-icalls.h"
72 #include "debugger-agent.h"
73 #include "seq-points.h"
74 #include "aot-compiler.h"
75 #include "mini-llvm.h"
77 #define BRANCH_COST 10
78 #define INLINE_LENGTH_LIMIT 20
80 /* These have 'cfg' as an implicit argument */
81 #define INLINE_FAILURE(msg) do { \
82 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
83 inline_failure (cfg, msg); \
84 goto exception_exit; \
87 #define CHECK_CFG_EXCEPTION do {\
88 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
89 goto exception_exit; \
91 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
92 method_access_failure ((cfg), (method), (cmethod)); \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
163 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
166 * Instruction metadata
174 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
175 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
186 /* keep in sync with the enum in mini.h */
189 #include "mini-ops.h"
194 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
195 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
197 * This should contain the index of the last sreg + 1. This is not the same
198 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
200 const gint8 ins_sreg_counts[] = {
201 #include "mini-ops.h"
206 #define MONO_INIT_VARINFO(vi,id) do { \
207 (vi)->range.first_use.pos.bid = 0xffff; \
213 mono_alloc_ireg (MonoCompile *cfg)
215 return alloc_ireg (cfg);
219 mono_alloc_lreg (MonoCompile *cfg)
221 return alloc_lreg (cfg);
225 mono_alloc_freg (MonoCompile *cfg)
227 return alloc_freg (cfg);
231 mono_alloc_preg (MonoCompile *cfg)
233 return alloc_preg (cfg);
237 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
239 return alloc_dreg (cfg, stack_type);
243 * mono_alloc_ireg_ref:
245 * Allocate an IREG, and mark it as holding a GC ref.
248 mono_alloc_ireg_ref (MonoCompile *cfg)
250 return alloc_ireg_ref (cfg);
254 * mono_alloc_ireg_mp:
256 * Allocate an IREG, and mark it as holding a managed pointer.
259 mono_alloc_ireg_mp (MonoCompile *cfg)
261 return alloc_ireg_mp (cfg);
265 * mono_alloc_ireg_copy:
267 * Allocate an IREG with the same GC type as VREG.
270 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
272 if (vreg_is_ref (cfg, vreg))
273 return alloc_ireg_ref (cfg);
274 else if (vreg_is_mp (cfg, vreg))
275 return alloc_ireg_mp (cfg);
277 return alloc_ireg (cfg);
281 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
286 type = mini_get_underlying_type (type);
288 switch (type->type) {
301 case MONO_TYPE_FNPTR:
303 case MONO_TYPE_CLASS:
304 case MONO_TYPE_STRING:
305 case MONO_TYPE_OBJECT:
306 case MONO_TYPE_SZARRAY:
307 case MONO_TYPE_ARRAY:
311 #if SIZEOF_REGISTER == 8
317 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
320 case MONO_TYPE_VALUETYPE:
321 if (type->data.klass->enumtype) {
322 type = mono_class_enum_basetype (type->data.klass);
325 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 case MONO_TYPE_TYPEDBYREF:
330 case MONO_TYPE_GENERICINST:
331 type = &type->data.generic_class->container_class->byval_arg;
335 g_assert (cfg->gshared);
336 if (mini_type_var_is_vt (type))
339 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
341 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
347 mono_print_bb (MonoBasicBlock *bb, const char *msg)
352 printf ("\n%s %d: [IN: ", msg, bb->block_num);
353 for (i = 0; i < bb->in_count; ++i)
354 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
356 for (i = 0; i < bb->out_count; ++i)
357 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
359 for (tree = bb->code; tree; tree = tree->next)
360 mono_print_ins_index (-1, tree);
364 mono_create_helper_signatures (void)
366 helper_sig_domain_get = mono_create_icall_signature ("ptr");
367 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
368 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
384 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
395 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, cfg->cbb, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, cfg->cbb, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, cfg->cbb, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (cfg->cbb, cmp); \
559 MONO_ADD_INS (cfg->cbb, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 for (i = 0; i < header->num_clauses; ++i) {
667 clause = &header->clauses [i];
669 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
670 return ((i + 1) << 8) | clause->flags;
677 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
679 MonoMethodHeader *header = cfg->header;
680 MonoExceptionClause *clause;
684 for (i = 0; i < header->num_clauses; ++i) {
685 clause = &header->clauses [i];
686 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
687 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
688 if (clause->flags == type)
689 res = g_list_append (res, clause);
696 mono_create_spvar_for_region (MonoCompile *cfg, int region)
700 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
704 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
705 /* prevent it from being register allocated */
706 var->flags |= MONO_INST_VOLATILE;
708 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
712 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
714 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
718 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
722 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
726 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
727 /* prevent it from being register allocated */
728 var->flags |= MONO_INST_VOLATILE;
730 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
736 * Returns the type used in the eval stack when @type is loaded.
737 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
740 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
744 type = mini_get_underlying_type (type);
745 inst->klass = klass = mono_class_from_mono_type (type);
747 inst->type = STACK_MP;
752 switch (type->type) {
754 inst->type = STACK_INV;
762 inst->type = STACK_I4;
767 case MONO_TYPE_FNPTR:
768 inst->type = STACK_PTR;
770 case MONO_TYPE_CLASS:
771 case MONO_TYPE_STRING:
772 case MONO_TYPE_OBJECT:
773 case MONO_TYPE_SZARRAY:
774 case MONO_TYPE_ARRAY:
775 inst->type = STACK_OBJ;
779 inst->type = STACK_I8;
782 inst->type = cfg->r4_stack_type;
785 inst->type = STACK_R8;
787 case MONO_TYPE_VALUETYPE:
788 if (type->data.klass->enumtype) {
789 type = mono_class_enum_basetype (type->data.klass);
793 inst->type = STACK_VTYPE;
796 case MONO_TYPE_TYPEDBYREF:
797 inst->klass = mono_defaults.typed_reference_class;
798 inst->type = STACK_VTYPE;
800 case MONO_TYPE_GENERICINST:
801 type = &type->data.generic_class->container_class->byval_arg;
805 g_assert (cfg->gshared);
806 if (mini_is_gsharedvt_type (type)) {
807 g_assert (cfg->gsharedvt);
808 inst->type = STACK_VTYPE;
810 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
814 g_error ("unknown type 0x%02x in eval stack type", type->type);
819 * The following tables are used to quickly validate the IL code in type_from_op ().
822 bin_num_table [STACK_MAX] [STACK_MAX] = {
823 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
828 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
836 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
839 /* reduce the size of this table */
841 bin_int_table [STACK_MAX] [STACK_MAX] = {
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
853 bin_comp_table [STACK_MAX] [STACK_MAX] = {
854 /* Inv i L p F & O vt r4 */
856 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
857 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
858 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
859 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
860 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
861 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
862 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
863 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
866 /* reduce the size of this table */
868 shift_table [STACK_MAX] [STACK_MAX] = {
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
880 * Tables to map from the non-specific opcode to the matching
881 * type-specific opcode.
883 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
885 binops_op_map [STACK_MAX] = {
886 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
889 /* handles from CEE_NEG to CEE_CONV_U8 */
891 unops_op_map [STACK_MAX] = {
892 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
895 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
897 ovfops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
901 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
903 ovf2ops_op_map [STACK_MAX] = {
904 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
907 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
909 ovf3ops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
913 /* handles from CEE_BEQ to CEE_BLT_UN */
915 beqops_op_map [STACK_MAX] = {
916 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
919 /* handles from CEE_CEQ to CEE_CLT_UN */
921 ceqops_op_map [STACK_MAX] = {
922 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
926 * Sets ins->type (the type on the eval stack) according to the
927 * type of the opcode and the arguments to it.
928 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
930 * FIXME: this function sets ins->type unconditionally in some cases, but
931 * it should set it to invalid for some types (a conv.x on an object)
934 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
936 switch (ins->opcode) {
943 /* FIXME: check unverifiable args for STACK_MP */
944 ins->type = bin_num_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_int_table [src1->type] [src2->type];
953 ins->opcode += binops_op_map [ins->type];
958 ins->type = shift_table [src1->type] [src2->type];
959 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
965 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
966 ins->opcode = OP_LCOMPARE;
967 else if (src1->type == STACK_R4)
968 ins->opcode = OP_RCOMPARE;
969 else if (src1->type == STACK_R8)
970 ins->opcode = OP_FCOMPARE;
972 ins->opcode = OP_ICOMPARE;
974 case OP_ICOMPARE_IMM:
975 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
976 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
977 ins->opcode = OP_LCOMPARE_IMM;
989 ins->opcode += beqops_op_map [src1->type];
992 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
993 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1000 ins->opcode += ceqops_op_map [src1->type];
1004 ins->type = neg_table [src1->type];
1005 ins->opcode += unops_op_map [ins->type];
1008 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1009 ins->type = src1->type;
1011 ins->type = STACK_INV;
1012 ins->opcode += unops_op_map [ins->type];
1018 ins->type = STACK_I4;
1019 ins->opcode += unops_op_map [src1->type];
1022 ins->type = STACK_R8;
1023 switch (src1->type) {
1026 ins->opcode = OP_ICONV_TO_R_UN;
1029 ins->opcode = OP_LCONV_TO_R_UN;
1033 case CEE_CONV_OVF_I1:
1034 case CEE_CONV_OVF_U1:
1035 case CEE_CONV_OVF_I2:
1036 case CEE_CONV_OVF_U2:
1037 case CEE_CONV_OVF_I4:
1038 case CEE_CONV_OVF_U4:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf3ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I_UN:
1043 case CEE_CONV_OVF_U_UN:
1044 ins->type = STACK_PTR;
1045 ins->opcode += ovf2ops_op_map [src1->type];
1047 case CEE_CONV_OVF_I1_UN:
1048 case CEE_CONV_OVF_I2_UN:
1049 case CEE_CONV_OVF_I4_UN:
1050 case CEE_CONV_OVF_U1_UN:
1051 case CEE_CONV_OVF_U2_UN:
1052 case CEE_CONV_OVF_U4_UN:
1053 ins->type = STACK_I4;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1057 ins->type = STACK_PTR;
1058 switch (src1->type) {
1060 ins->opcode = OP_ICONV_TO_U;
1064 #if SIZEOF_VOID_P == 8
1065 ins->opcode = OP_LCONV_TO_U;
1067 ins->opcode = OP_MOVE;
1071 ins->opcode = OP_LCONV_TO_U;
1074 ins->opcode = OP_FCONV_TO_U;
1080 ins->type = STACK_I8;
1081 ins->opcode += unops_op_map [src1->type];
1083 case CEE_CONV_OVF_I8:
1084 case CEE_CONV_OVF_U8:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf3ops_op_map [src1->type];
1088 case CEE_CONV_OVF_U8_UN:
1089 case CEE_CONV_OVF_I8_UN:
1090 ins->type = STACK_I8;
1091 ins->opcode += ovf2ops_op_map [src1->type];
1094 ins->type = cfg->r4_stack_type;
1095 ins->opcode += unops_op_map [src1->type];
1098 ins->type = STACK_R8;
1099 ins->opcode += unops_op_map [src1->type];
1102 ins->type = STACK_R8;
1106 ins->type = STACK_I4;
1107 ins->opcode += ovfops_op_map [src1->type];
1110 case CEE_CONV_OVF_I:
1111 case CEE_CONV_OVF_U:
1112 ins->type = STACK_PTR;
1113 ins->opcode += ovfops_op_map [src1->type];
1116 case CEE_ADD_OVF_UN:
1118 case CEE_MUL_OVF_UN:
1120 case CEE_SUB_OVF_UN:
1121 ins->type = bin_num_table [src1->type] [src2->type];
1122 ins->opcode += ovfops_op_map [src1->type];
1123 if (ins->type == STACK_R8)
1124 ins->type = STACK_INV;
1126 case OP_LOAD_MEMBASE:
1127 ins->type = STACK_PTR;
1129 case OP_LOADI1_MEMBASE:
1130 case OP_LOADU1_MEMBASE:
1131 case OP_LOADI2_MEMBASE:
1132 case OP_LOADU2_MEMBASE:
1133 case OP_LOADI4_MEMBASE:
1134 case OP_LOADU4_MEMBASE:
1135 ins->type = STACK_PTR;
1137 case OP_LOADI8_MEMBASE:
1138 ins->type = STACK_I8;
1140 case OP_LOADR4_MEMBASE:
1141 ins->type = cfg->r4_stack_type;
1143 case OP_LOADR8_MEMBASE:
1144 ins->type = STACK_R8;
1147 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1151 if (ins->type == STACK_MP)
1152 ins->klass = mono_defaults.object_class;
1157 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1163 param_table [STACK_MAX] [STACK_MAX] = {
1168 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1173 switch (args->type) {
1183 for (i = 0; i < sig->param_count; ++i) {
1184 switch (args [i].type) {
1188 if (!sig->params [i]->byref)
1192 if (sig->params [i]->byref)
1194 switch (sig->params [i]->type) {
1195 case MONO_TYPE_CLASS:
1196 case MONO_TYPE_STRING:
1197 case MONO_TYPE_OBJECT:
1198 case MONO_TYPE_SZARRAY:
1199 case MONO_TYPE_ARRAY:
1206 if (sig->params [i]->byref)
1208 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1217 /*if (!param_table [args [i].type] [sig->params [i]->type])
1225 * When we need a pointer to the current domain many times in a method, we
1226 * call mono_domain_get() once and we store the result in a local variable.
1227 * This function returns the variable that represents the MonoDomain*.
1229 inline static MonoInst *
1230 mono_get_domainvar (MonoCompile *cfg)
1232 if (!cfg->domainvar)
1233 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1234 return cfg->domainvar;
1238 * The got_var contains the address of the Global Offset Table when AOT
1242 mono_get_got_var (MonoCompile *cfg)
1244 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1246 if (!cfg->got_var) {
1247 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1249 return cfg->got_var;
1253 mono_get_vtable_var (MonoCompile *cfg)
1255 g_assert (cfg->gshared);
1257 if (!cfg->rgctx_var) {
1258 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1259 /* force the var to be stack allocated */
1260 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1263 return cfg->rgctx_var;
1267 type_from_stack_type (MonoInst *ins) {
1268 switch (ins->type) {
1269 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1270 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1271 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1272 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1273 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1275 return &ins->klass->this_arg;
1276 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1277 case STACK_VTYPE: return &ins->klass->byval_arg;
1279 g_error ("stack type %d to monotype not handled\n", ins->type);
1284 static G_GNUC_UNUSED int
1285 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1287 t = mono_type_get_underlying_type (t);
1299 case MONO_TYPE_FNPTR:
1301 case MONO_TYPE_CLASS:
1302 case MONO_TYPE_STRING:
1303 case MONO_TYPE_OBJECT:
1304 case MONO_TYPE_SZARRAY:
1305 case MONO_TYPE_ARRAY:
1311 return cfg->r4_stack_type;
1314 case MONO_TYPE_VALUETYPE:
1315 case MONO_TYPE_TYPEDBYREF:
1317 case MONO_TYPE_GENERICINST:
1318 if (mono_type_generic_inst_is_valuetype (t))
1324 g_assert_not_reached ();
1331 array_access_to_klass (int opcode)
1335 return mono_defaults.byte_class;
1337 return mono_defaults.uint16_class;
1340 return mono_defaults.int_class;
1343 return mono_defaults.sbyte_class;
1346 return mono_defaults.int16_class;
1349 return mono_defaults.int32_class;
1351 return mono_defaults.uint32_class;
1354 return mono_defaults.int64_class;
1357 return mono_defaults.single_class;
1360 return mono_defaults.double_class;
1361 case CEE_LDELEM_REF:
1362 case CEE_STELEM_REF:
1363 return mono_defaults.object_class;
1365 g_assert_not_reached ();
1371 * We try to share variables when possible
1374 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1379 /* inlining can result in deeper stacks */
1380 if (slot >= cfg->header->max_stack)
1381 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 pos = ins->type - 1 + slot * STACK_MAX;
1385 switch (ins->type) {
1392 if ((vnum = cfg->intvars [pos]))
1393 return cfg->varinfo [vnum];
1394 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1395 cfg->intvars [pos] = res->inst_c0;
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1404 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1407 * Don't use this if a generic_context is set, since that means AOT can't
1408 * look up the method using just the image+token.
1409 * table == 0 means this is a reference made from a wrapper.
1411 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1412 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1413 jump_info_token->image = image;
1414 jump_info_token->token = token;
1415 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1420 * This function is called to handle items that are left on the evaluation stack
1421 * at basic block boundaries. What happens is that we save the values to local variables
1422 * and we reload them later when first entering the target basic block (with the
1423 * handle_loaded_temps () function).
1424 * A single joint point will use the same variables (stored in the array bb->out_stack or
1425 * bb->in_stack, if the basic block is before or after the joint point).
1427 * This function needs to be called _before_ emitting the last instruction of
1428 * the bb (i.e. before emitting a branch).
1429 * If the stack merge fails at a join point, cfg->unverifiable is set.
1432 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1435 MonoBasicBlock *bb = cfg->cbb;
1436 MonoBasicBlock *outb;
1437 MonoInst *inst, **locals;
1442 if (cfg->verbose_level > 3)
1443 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1444 if (!bb->out_scount) {
1445 bb->out_scount = count;
1446 //printf ("bblock %d has out:", bb->block_num);
1448 for (i = 0; i < bb->out_count; ++i) {
1449 outb = bb->out_bb [i];
1450 /* exception handlers are linked, but they should not be considered for stack args */
1451 if (outb->flags & BB_EXCEPTION_HANDLER)
1453 //printf (" %d", outb->block_num);
1454 if (outb->in_stack) {
1456 bb->out_stack = outb->in_stack;
1462 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1463 for (i = 0; i < count; ++i) {
1465 * try to reuse temps already allocated for this purpouse, if they occupy the same
1466 * stack slot and if they are of the same type.
1467 * This won't cause conflicts since if 'local' is used to
1468 * store one of the values in the in_stack of a bblock, then
1469 * the same variable will be used for the same outgoing stack
1471 * This doesn't work when inlining methods, since the bblocks
1472 * in the inlined methods do not inherit their in_stack from
1473 * the bblock they are inlined to. See bug #58863 for an
1476 if (cfg->inlined_method)
1477 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1479 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1484 for (i = 0; i < bb->out_count; ++i) {
1485 outb = bb->out_bb [i];
1486 /* exception handlers are linked, but they should not be considered for stack args */
1487 if (outb->flags & BB_EXCEPTION_HANDLER)
1489 if (outb->in_scount) {
1490 if (outb->in_scount != bb->out_scount) {
1491 cfg->unverifiable = TRUE;
1494 continue; /* check they are the same locals */
1496 outb->in_scount = count;
1497 outb->in_stack = bb->out_stack;
1500 locals = bb->out_stack;
1502 for (i = 0; i < count; ++i) {
1503 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1504 inst->cil_code = sp [i]->cil_code;
1505 sp [i] = locals [i];
1506 if (cfg->verbose_level > 3)
1507 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1511 * It is possible that the out bblocks already have in_stack assigned, and
1512 * the in_stacks differ. In this case, we will store to all the different
1519 /* Find a bblock which has a different in_stack */
1521 while (bindex < bb->out_count) {
1522 outb = bb->out_bb [bindex];
1523 /* exception handlers are linked, but they should not be considered for stack args */
1524 if (outb->flags & BB_EXCEPTION_HANDLER) {
1528 if (outb->in_stack != locals) {
1529 for (i = 0; i < count; ++i) {
1530 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1531 inst->cil_code = sp [i]->cil_code;
1532 sp [i] = locals [i];
1533 if (cfg->verbose_level > 3)
1534 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1536 locals = outb->in_stack;
1546 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1550 if (cfg->compile_aot) {
1551 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1557 ji.type = patch_type;
1558 ji.data.target = data;
1559 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1560 mono_error_assert_ok (&error);
1562 EMIT_NEW_PCONST (cfg, ins, target);
1568 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1570 int ibitmap_reg = alloc_preg (cfg);
1571 #ifdef COMPRESSED_INTERFACE_BITMAP
1573 MonoInst *res, *ins;
1574 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1575 MONO_ADD_INS (cfg->cbb, ins);
1577 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1578 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1579 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1581 int ibitmap_byte_reg = alloc_preg (cfg);
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1585 if (cfg->compile_aot) {
1586 int iid_reg = alloc_preg (cfg);
1587 int shifted_iid_reg = alloc_preg (cfg);
1588 int ibitmap_byte_address_reg = alloc_preg (cfg);
1589 int masked_iid_reg = alloc_preg (cfg);
1590 int iid_one_bit_reg = alloc_preg (cfg);
1591 int iid_bit_reg = alloc_preg (cfg);
1592 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1594 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1597 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1608 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1609 * stored in "klass_reg" implements the interface "klass".
1612 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1614 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1618 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1619 * stored in "vtable_reg" implements the interface "klass".
1622 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1624 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1628 * Emit code which checks whenever the interface id of @klass is smaller than
1629 * than the value given by max_iid_reg.
1632 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1633 MonoBasicBlock *false_target)
1635 if (cfg->compile_aot) {
1636 int iid_reg = alloc_preg (cfg);
1637 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1638 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1648 /* Same as above, but obtains max_iid from a vtable */
1650 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1651 MonoBasicBlock *false_target)
1653 int max_iid_reg = alloc_preg (cfg);
1655 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1656 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1659 /* Same as above, but obtains max_iid from a klass */
1661 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1662 MonoBasicBlock *false_target)
1664 int max_iid_reg = alloc_preg (cfg);
1666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1667 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1671 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1673 int idepth_reg = alloc_preg (cfg);
1674 int stypes_reg = alloc_preg (cfg);
1675 int stype = alloc_preg (cfg);
1677 mono_class_setup_supertypes (klass);
1679 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1687 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1688 } else if (cfg->compile_aot) {
1689 int const_reg = alloc_preg (cfg);
1690 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1699 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1701 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1705 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 int intf_reg = alloc_preg (cfg);
1709 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1710 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1715 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1719 * Variant of the above that takes a register to the class, not the vtable.
1722 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1724 int intf_bit_reg = alloc_preg (cfg);
1726 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1727 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1732 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1736 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1739 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1741 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1742 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1744 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1748 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1750 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1754 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1756 if (cfg->compile_aot) {
1757 int const_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1759 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1767 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1770 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1773 int rank_reg = alloc_preg (cfg);
1774 int eclass_reg = alloc_preg (cfg);
1776 g_assert (!klass_inst);
1777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1780 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1782 if (klass->cast_class == mono_defaults.object_class) {
1783 int parent_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1785 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1786 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1787 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1788 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1789 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1790 } else if (klass->cast_class == mono_defaults.enum_class) {
1791 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1792 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1793 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1795 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1796 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1799 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1800 /* Check that the object is a vector too */
1801 int bounds_reg = alloc_preg (cfg);
1802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1804 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1807 int idepth_reg = alloc_preg (cfg);
1808 int stypes_reg = alloc_preg (cfg);
1809 int stype = alloc_preg (cfg);
1811 mono_class_setup_supertypes (klass);
1813 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1816 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1820 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1825 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1827 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1831 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1835 g_assert (val == 0);
1840 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1849 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1851 #if SIZEOF_REGISTER == 8
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1859 val_reg = alloc_preg (cfg);
1861 if (SIZEOF_REGISTER == 8)
1862 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1864 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1867 /* This could be optimized further if neccesary */
1869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1876 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1907 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1914 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1915 g_assert (size < 10000);
1918 /* This could be optimized further if neccesary */
1920 cur_reg = alloc_preg (cfg);
1921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1929 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1931 cur_reg = alloc_preg (cfg);
1932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1949 cur_reg = alloc_preg (cfg);
1950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1957 cur_reg = alloc_preg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1967 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1971 if (cfg->compile_aot) {
1972 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1973 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1975 ins->sreg2 = c->dreg;
1976 MONO_ADD_INS (cfg->cbb, ins);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1980 ins->inst_offset = mini_get_tls_offset (tls_key);
1981 MONO_ADD_INS (cfg->cbb, ins);
1988 * Emit IR to push the current LMF onto the LMF stack.
1991 emit_push_lmf (MonoCompile *cfg)
1994 * Emit IR to push the LMF:
1995 * lmf_addr = <lmf_addr from tls>
1996 * lmf->lmf_addr = lmf_addr
1997 * lmf->prev_lmf = *lmf_addr
2000 int lmf_reg, prev_lmf_reg;
2001 MonoInst *ins, *lmf_ins;
2006 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2007 /* Load current lmf */
2008 lmf_ins = mono_get_lmf_intrinsic (cfg);
2010 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2012 lmf_reg = ins->dreg;
2013 /* Save previous_lmf */
2014 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2016 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2019 * Store lmf_addr in a variable, so it can be allocated to a global register.
2021 if (!cfg->lmf_addr_var)
2022 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2025 ins = mono_get_jit_tls_intrinsic (cfg);
2027 int jit_tls_dreg = ins->dreg;
2029 MONO_ADD_INS (cfg->cbb, ins);
2030 lmf_reg = alloc_preg (cfg);
2031 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2033 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2036 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2038 MONO_ADD_INS (cfg->cbb, lmf_ins);
2041 MonoInst *args [16], *jit_tls_ins, *ins;
2043 /* Inline mono_get_lmf_addr () */
2044 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2046 /* Load mono_jit_tls_id */
2047 if (cfg->compile_aot)
2048 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2050 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2051 /* call pthread_getspecific () */
2052 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2053 /* lmf_addr = &jit_tls->lmf */
2054 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2057 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2061 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2063 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2064 lmf_reg = ins->dreg;
2066 prev_lmf_reg = alloc_preg (cfg);
2067 /* Save previous_lmf */
2068 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2069 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2071 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2078 * Emit IR to pop the current LMF from the LMF stack.
2081 emit_pop_lmf (MonoCompile *cfg)
2083 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2089 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2090 lmf_reg = ins->dreg;
2092 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2093 /* Load previous_lmf */
2094 prev_lmf_reg = alloc_preg (cfg);
2095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2097 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2100 * Emit IR to pop the LMF:
2101 * *(lmf->lmf_addr) = lmf->prev_lmf
2103 /* This could be called before emit_push_lmf () */
2104 if (!cfg->lmf_addr_var)
2105 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2106 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2108 prev_lmf_reg = alloc_preg (cfg);
2109 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2110 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2115 emit_instrumentation_call (MonoCompile *cfg, void *func)
2117 MonoInst *iargs [1];
2120 * Avoid instrumenting inlined methods since it can
2121 * distort profiling results.
2123 if (cfg->method != cfg->current_method)
2126 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2127 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2128 mono_emit_jit_icall (cfg, func, iargs);
2133 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2136 type = mini_get_underlying_type (type);
2137 switch (type->type) {
2138 case MONO_TYPE_VOID:
2139 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2146 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2150 case MONO_TYPE_FNPTR:
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2152 case MONO_TYPE_CLASS:
2153 case MONO_TYPE_STRING:
2154 case MONO_TYPE_OBJECT:
2155 case MONO_TYPE_SZARRAY:
2156 case MONO_TYPE_ARRAY:
2157 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2160 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2163 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2165 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2167 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2168 case MONO_TYPE_VALUETYPE:
2169 if (type->data.klass->enumtype) {
2170 type = mono_class_enum_basetype (type->data.klass);
2173 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2174 case MONO_TYPE_TYPEDBYREF:
2175 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2176 case MONO_TYPE_GENERICINST:
2177 type = &type->data.generic_class->container_class->byval_arg;
2180 case MONO_TYPE_MVAR:
2182 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2184 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2190 * target_type_is_incompatible:
2191 * @cfg: MonoCompile context
2193 * Check that the item @arg on the evaluation stack can be stored
2194 * in the target type (can be a local, or field, etc).
2195 * The cfg arg can be used to check if we need verification or just
2198 * Returns: non-0 value if arg can't be stored on a target.
2201 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2203 MonoType *simple_type;
2206 if (target->byref) {
2207 /* FIXME: check that the pointed to types match */
2208 if (arg->type == STACK_MP) {
2209 MonoClass *base_class = mono_class_from_mono_type (target);
2210 /* This is needed to handle gshared types + ldaddr */
2211 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2212 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2214 if (arg->type == STACK_PTR)
2219 simple_type = mini_get_underlying_type (target);
2220 switch (simple_type->type) {
2221 case MONO_TYPE_VOID:
2229 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2233 /* STACK_MP is needed when setting pinned locals */
2234 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2239 case MONO_TYPE_FNPTR:
2241 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2242 * in native int. (#688008).
2244 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2247 case MONO_TYPE_CLASS:
2248 case MONO_TYPE_STRING:
2249 case MONO_TYPE_OBJECT:
2250 case MONO_TYPE_SZARRAY:
2251 case MONO_TYPE_ARRAY:
2252 if (arg->type != STACK_OBJ)
2254 /* FIXME: check type compatibility */
2258 if (arg->type != STACK_I8)
2262 if (arg->type != cfg->r4_stack_type)
2266 if (arg->type != STACK_R8)
2269 case MONO_TYPE_VALUETYPE:
2270 if (arg->type != STACK_VTYPE)
2272 klass = mono_class_from_mono_type (simple_type);
2273 if (klass != arg->klass)
2276 case MONO_TYPE_TYPEDBYREF:
2277 if (arg->type != STACK_VTYPE)
2279 klass = mono_class_from_mono_type (simple_type);
2280 if (klass != arg->klass)
2283 case MONO_TYPE_GENERICINST:
2284 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2285 MonoClass *target_class;
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 target_class = mono_class_from_mono_type (target);
2290 /* The second cases is needed when doing partial sharing */
2291 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2295 if (arg->type != STACK_OBJ)
2297 /* FIXME: check type compatibility */
2301 case MONO_TYPE_MVAR:
2302 g_assert (cfg->gshared);
2303 if (mini_type_var_is_vt (simple_type)) {
2304 if (arg->type != STACK_VTYPE)
2307 if (arg->type != STACK_OBJ)
2312 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2318 * Prepare arguments for passing to a function call.
2319 * Return a non-zero value if the arguments can't be passed to the given
2321 * The type checks are not yet complete and some conversions may need
2322 * casts on 32 or 64 bit architectures.
2324 * FIXME: implement this using target_type_is_incompatible ()
2327 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2329 MonoType *simple_type;
2333 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2337 for (i = 0; i < sig->param_count; ++i) {
2338 if (sig->params [i]->byref) {
2339 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2343 simple_type = mini_get_underlying_type (sig->params [i]);
2345 switch (simple_type->type) {
2346 case MONO_TYPE_VOID:
2355 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2361 case MONO_TYPE_FNPTR:
2362 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2365 case MONO_TYPE_CLASS:
2366 case MONO_TYPE_STRING:
2367 case MONO_TYPE_OBJECT:
2368 case MONO_TYPE_SZARRAY:
2369 case MONO_TYPE_ARRAY:
2370 if (args [i]->type != STACK_OBJ)
2375 if (args [i]->type != STACK_I8)
2379 if (args [i]->type != cfg->r4_stack_type)
2383 if (args [i]->type != STACK_R8)
2386 case MONO_TYPE_VALUETYPE:
2387 if (simple_type->data.klass->enumtype) {
2388 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2391 if (args [i]->type != STACK_VTYPE)
2394 case MONO_TYPE_TYPEDBYREF:
2395 if (args [i]->type != STACK_VTYPE)
2398 case MONO_TYPE_GENERICINST:
2399 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2402 case MONO_TYPE_MVAR:
2404 if (args [i]->type != STACK_VTYPE)
2408 g_error ("unknown type 0x%02x in check_call_signature",
2416 callvirt_to_call (int opcode)
2419 case OP_CALL_MEMBASE:
2421 case OP_VOIDCALL_MEMBASE:
2423 case OP_FCALL_MEMBASE:
2425 case OP_RCALL_MEMBASE:
2427 case OP_VCALL_MEMBASE:
2429 case OP_LCALL_MEMBASE:
2432 g_assert_not_reached ();
2439 callvirt_to_call_reg (int opcode)
2442 case OP_CALL_MEMBASE:
2444 case OP_VOIDCALL_MEMBASE:
2445 return OP_VOIDCALL_REG;
2446 case OP_FCALL_MEMBASE:
2447 return OP_FCALL_REG;
2448 case OP_RCALL_MEMBASE:
2449 return OP_RCALL_REG;
2450 case OP_VCALL_MEMBASE:
2451 return OP_VCALL_REG;
2452 case OP_LCALL_MEMBASE:
2453 return OP_LCALL_REG;
2455 g_assert_not_reached ();
2461 /* Either METHOD or IMT_ARG needs to be set */
2463 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2467 if (COMPILE_LLVM (cfg)) {
2469 method_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2472 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2473 method_reg = ins->dreg;
2477 call->imt_arg_reg = method_reg;
2479 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2484 method_reg = alloc_preg (cfg);
2485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2487 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2488 method_reg = ins->dreg;
2491 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2494 static MonoJumpInfo *
2495 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2497 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2501 ji->data.target = target;
2507 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2510 return mono_class_check_context_used (klass);
2516 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2519 return mono_method_check_context_used (method);
2525 * check_method_sharing:
2527 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2530 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2532 gboolean pass_vtable = FALSE;
2533 gboolean pass_mrgctx = FALSE;
2535 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2536 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2537 gboolean sharable = FALSE;
2539 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2543 * Pass vtable iff target method might
2544 * be shared, which means that sharing
2545 * is enabled for its class and its
2546 * context is sharable (and it's not a
2549 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2553 if (mini_method_get_context (cmethod) &&
2554 mini_method_get_context (cmethod)->method_inst) {
2555 g_assert (!pass_vtable);
2557 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2560 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2565 if (out_pass_vtable)
2566 *out_pass_vtable = pass_vtable;
2567 if (out_pass_mrgctx)
2568 *out_pass_mrgctx = pass_mrgctx;
2571 inline static MonoCallInst *
2572 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2573 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2577 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2585 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2587 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2589 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2592 call->signature = sig;
2593 call->rgctx_reg = rgctx;
2594 sig_ret = mini_get_underlying_type (sig->ret);
2596 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2599 if (mini_type_is_vtype (sig_ret)) {
2600 call->vret_var = cfg->vret_addr;
2601 //g_assert_not_reached ();
2603 } else if (mini_type_is_vtype (sig_ret)) {
2604 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2607 temp->backend.is_pinvoke = sig->pinvoke;
2610 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2611 * address of return value to increase optimization opportunities.
2612 * Before vtype decomposition, the dreg of the call ins itself represents the
2613 * fact the call modifies the return value. After decomposition, the call will
2614 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2615 * will be transformed into an LDADDR.
2617 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2618 loada->dreg = alloc_preg (cfg);
2619 loada->inst_p0 = temp;
2620 /* We reference the call too since call->dreg could change during optimization */
2621 loada->inst_p1 = call;
2622 MONO_ADD_INS (cfg->cbb, loada);
2624 call->inst.dreg = temp->dreg;
2626 call->vret_var = loada;
2627 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2628 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2630 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2631 if (COMPILE_SOFT_FLOAT (cfg)) {
2633 * If the call has a float argument, we would need to do an r8->r4 conversion using
2634 * an icall, but that cannot be done during the call sequence since it would clobber
2635 * the call registers + the stack. So we do it before emitting the call.
2637 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2639 MonoInst *in = call->args [i];
2641 if (i >= sig->hasthis)
2642 t = sig->params [i - sig->hasthis];
2644 t = &mono_defaults.int_class->byval_arg;
2645 t = mono_type_get_underlying_type (t);
2647 if (!t->byref && t->type == MONO_TYPE_R4) {
2648 MonoInst *iargs [1];
2652 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2654 /* The result will be in an int vreg */
2655 call->args [i] = conv;
2661 call->need_unbox_trampoline = unbox_trampoline;
2664 if (COMPILE_LLVM (cfg))
2665 mono_llvm_emit_call (cfg, call);
2667 mono_arch_emit_call (cfg, call);
2669 mono_arch_emit_call (cfg, call);
2672 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2673 cfg->flags |= MONO_CFG_HAS_CALLS;
2679 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2681 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2682 cfg->uses_rgctx_reg = TRUE;
2683 call->rgctx_reg = TRUE;
2685 call->rgctx_arg_reg = rgctx_reg;
2689 inline static MonoInst*
2690 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2695 gboolean check_sp = FALSE;
2697 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2698 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2700 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2705 rgctx_reg = mono_alloc_preg (cfg);
2706 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2710 if (!cfg->stack_inbalance_var)
2711 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2713 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 ins->dreg = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2718 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2720 call->inst.sreg1 = addr->dreg;
2723 emit_imt_argument (cfg, call, NULL, imt_arg);
2725 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2730 sp_reg = mono_alloc_preg (cfg);
2732 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2734 MONO_ADD_INS (cfg->cbb, ins);
2736 /* Restore the stack so we don't crash when throwing the exception */
2737 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2738 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2739 MONO_ADD_INS (cfg->cbb, ins);
2741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2742 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2746 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2748 return (MonoInst*)call;
2752 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2755 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2757 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2760 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2761 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2763 #ifndef DISABLE_REMOTING
2764 gboolean might_be_remote = FALSE;
2766 gboolean virtual_ = this_ins != NULL;
2767 gboolean enable_for_aot = TRUE;
2770 MonoInst *call_target = NULL;
2772 gboolean need_unbox_trampoline;
2775 sig = mono_method_signature (method);
2777 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2778 g_assert_not_reached ();
2781 rgctx_reg = mono_alloc_preg (cfg);
2782 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2785 if (method->string_ctor) {
2786 /* Create the real signature */
2787 /* FIXME: Cache these */
2788 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2789 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2794 context_used = mini_method_check_context_used (cfg, method);
2796 #ifndef DISABLE_REMOTING
2797 might_be_remote = this_ins && sig->hasthis &&
2798 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2799 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2801 if (might_be_remote && context_used) {
2804 g_assert (cfg->gshared);
2806 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2808 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2812 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2813 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2815 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2817 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2819 #ifndef DISABLE_REMOTING
2820 if (might_be_remote)
2821 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2824 call->method = method;
2825 call->inst.flags |= MONO_INST_HAS_METHOD;
2826 call->inst.inst_left = this_ins;
2827 call->tail_call = tail;
2830 int vtable_reg, slot_reg, this_reg;
2833 this_reg = this_ins->dreg;
2835 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2836 MonoInst *dummy_use;
2838 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2840 /* Make a call to delegate->invoke_impl */
2841 call->inst.inst_basereg = this_reg;
2842 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2845 /* We must emit a dummy use here because the delegate trampoline will
2846 replace the 'this' argument with the delegate target making this activation
2847 no longer a root for the delegate.
2848 This is an issue for delegates that target collectible code such as dynamic
2849 methods of GC'able assemblies.
2851 For a test case look into #667921.
2853 FIXME: a dummy use is not the best way to do it as the local register allocator
2854 will put it on a caller save register and spil it around the call.
2855 Ideally, we would either put it on a callee save register or only do the store part.
2857 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2859 return (MonoInst*)call;
2862 if ((!cfg->compile_aot || enable_for_aot) &&
2863 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2864 (MONO_METHOD_IS_FINAL (method) &&
2865 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2866 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2868 * the method is not virtual, we just need to ensure this is not null
2869 * and then we can call the method directly.
2871 #ifndef DISABLE_REMOTING
2872 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2874 * The check above ensures method is not gshared, this is needed since
2875 * gshared methods can't have wrappers.
2877 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2881 if (!method->string_ctor)
2882 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2884 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2885 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2887 * the method is virtual, but we can statically dispatch since either
2888 * it's class or the method itself are sealed.
2889 * But first we need to ensure it's not a null reference.
2891 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2893 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2894 } else if (call_target) {
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2898 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2899 call->inst.sreg1 = call_target->dreg;
2900 call->inst.flags &= !MONO_INST_HAS_METHOD;
2902 vtable_reg = alloc_preg (cfg);
2903 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2904 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2905 guint32 imt_slot = mono_method_get_imt_slot (method);
2906 emit_imt_argument (cfg, call, call->method, imt_arg);
2907 slot_reg = vtable_reg;
2908 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2910 slot_reg = vtable_reg;
2911 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2912 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2914 g_assert (mono_method_signature (method)->generic_param_count);
2915 emit_imt_argument (cfg, call, call->method, imt_arg);
2919 call->inst.sreg1 = slot_reg;
2920 call->inst.inst_offset = offset;
2921 call->is_virtual = TRUE;
2925 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2928 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2930 return (MonoInst*)call;
2934 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2936 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2940 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2947 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2950 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2952 return (MonoInst*)call;
2956 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2958 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2962 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2966 * mono_emit_abs_call:
2968 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2970 inline static MonoInst*
2971 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2972 MonoMethodSignature *sig, MonoInst **args)
2974 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2978 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2981 if (cfg->abs_patches == NULL)
2982 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2983 g_hash_table_insert (cfg->abs_patches, ji, ji);
2984 ins = mono_emit_native_call (cfg, ji, sig, args);
2985 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2989 static MonoMethodSignature*
2990 sig_to_rgctx_sig (MonoMethodSignature *sig)
2992 // FIXME: memory allocation
2993 MonoMethodSignature *res;
2996 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2997 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2998 res->param_count = sig->param_count + 1;
2999 for (i = 0; i < sig->param_count; ++i)
3000 res->params [i] = sig->params [i];
3001 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3005 /* Make an indirect call to FSIG passing an additional argument */
3007 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3009 MonoMethodSignature *csig;
3010 MonoInst *args_buf [16];
3012 int i, pindex, tmp_reg;
3014 /* Make a call with an rgctx/extra arg */
3015 if (fsig->param_count + 2 < 16)
3018 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3021 args [pindex ++] = orig_args [0];
3022 for (i = 0; i < fsig->param_count; ++i)
3023 args [pindex ++] = orig_args [fsig->hasthis + i];
3024 tmp_reg = alloc_preg (cfg);
3025 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3026 csig = sig_to_rgctx_sig (fsig);
3027 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3030 /* Emit an indirect call to the function descriptor ADDR */
3032 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3034 int addr_reg, arg_reg;
3035 MonoInst *call_target;
3037 g_assert (cfg->llvm_only);
3040 * addr points to a <addr, arg> pair, load both of them, and
3041 * make a call to addr, passing arg as an extra arg.
3043 addr_reg = alloc_preg (cfg);
3044 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3045 arg_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3048 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3052 direct_icalls_enabled (MonoCompile *cfg)
3054 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3056 if (cfg->compile_llvm)
3059 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3065 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3068 * Call the jit icall without a wrapper if possible.
3069 * The wrapper is needed for the following reasons:
3070 * - to handle exceptions thrown using mono_raise_exceptions () from the
3071 * icall function. The EH code needs the lmf frame pushed by the
3072 * wrapper to be able to unwind back to managed code.
3073 * - to be able to do stack walks for asynchronously suspended
3074 * threads when debugging.
3076 if (info->no_raise && direct_icalls_enabled (cfg)) {
3080 if (!info->wrapper_method) {
3081 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3082 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3084 mono_memory_barrier ();
3088 * Inline the wrapper method, which is basically a call to the C icall, and
3089 * an exception check.
3091 costs = inline_method (cfg, info->wrapper_method, NULL,
3092 args, NULL, cfg->real_offset, TRUE);
3093 g_assert (costs > 0);
3094 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3103 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3105 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3106 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3110 * Native code might return non register sized integers
3111 * without initializing the upper bits.
3113 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3114 case OP_LOADI1_MEMBASE:
3115 widen_op = OP_ICONV_TO_I1;
3117 case OP_LOADU1_MEMBASE:
3118 widen_op = OP_ICONV_TO_U1;
3120 case OP_LOADI2_MEMBASE:
3121 widen_op = OP_ICONV_TO_I2;
3123 case OP_LOADU2_MEMBASE:
3124 widen_op = OP_ICONV_TO_U2;
3130 if (widen_op != -1) {
3131 int dreg = alloc_preg (cfg);
3134 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3135 widen->type = ins->type;
3145 get_memcpy_method (void)
3147 static MonoMethod *memcpy_method = NULL;
3148 if (!memcpy_method) {
3149 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3151 g_error ("Old corlib found. Install a new one");
3153 return memcpy_method;
3157 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3159 MonoClassField *field;
3160 gpointer iter = NULL;
3162 while ((field = mono_class_get_fields (klass, &iter))) {
3165 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3167 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3168 if (mini_type_is_reference (mono_field_get_type (field))) {
3169 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3170 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3172 MonoClass *field_class = mono_class_from_mono_type (field->type);
3173 if (field_class->has_references)
3174 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3180 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3182 int card_table_shift_bits;
3183 gpointer card_table_mask;
3185 MonoInst *dummy_use;
3186 int nursery_shift_bits;
3187 size_t nursery_size;
3189 if (!cfg->gen_write_barriers)
3192 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3194 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3196 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3199 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3200 wbarrier->sreg1 = ptr->dreg;
3201 wbarrier->sreg2 = value->dreg;
3202 MONO_ADD_INS (cfg->cbb, wbarrier);
3203 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3204 int offset_reg = alloc_preg (cfg);
3208 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3209 if (card_table_mask)
3210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3212 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3213 * IMM's larger than 32bits.
3215 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3216 card_reg = ins->dreg;
3218 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3219 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3221 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3222 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3225 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3229 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3231 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3232 unsigned need_wb = 0;
3237 /*types with references can't have alignment smaller than sizeof(void*) */
3238 if (align < SIZEOF_VOID_P)
3241 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3242 if (size > 32 * SIZEOF_VOID_P)
3245 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3247 /* We don't unroll more than 5 stores to avoid code bloat. */
3248 if (size > 5 * SIZEOF_VOID_P) {
3249 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3250 size += (SIZEOF_VOID_P - 1);
3251 size &= ~(SIZEOF_VOID_P - 1);
3253 EMIT_NEW_ICONST (cfg, iargs [2], size);
3254 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3255 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3259 destreg = iargs [0]->dreg;
3260 srcreg = iargs [1]->dreg;
3263 dest_ptr_reg = alloc_preg (cfg);
3264 tmp_reg = alloc_preg (cfg);
3267 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3269 while (size >= SIZEOF_VOID_P) {
3270 MonoInst *load_inst;
3271 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3272 load_inst->dreg = tmp_reg;
3273 load_inst->inst_basereg = srcreg;
3274 load_inst->inst_offset = offset;
3275 MONO_ADD_INS (cfg->cbb, load_inst);
3277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3280 emit_write_barrier (cfg, iargs [0], load_inst);
3282 offset += SIZEOF_VOID_P;
3283 size -= SIZEOF_VOID_P;
3286 /*tmp += sizeof (void*)*/
3287 if (size >= SIZEOF_VOID_P) {
3288 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3289 MONO_ADD_INS (cfg->cbb, iargs [0]);
3293 /* Those cannot be references since size < sizeof (void*) */
3295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3302 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3309 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3310 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3319 * Emit code to copy a valuetype of type @klass whose address is stored in
3320 * @src->dreg to memory whose address is stored at @dest->dreg.
3323 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3325 MonoInst *iargs [4];
3328 MonoMethod *memcpy_method;
3329 MonoInst *size_ins = NULL;
3330 MonoInst *memcpy_ins = NULL;
3334 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3337 * This check breaks with spilled vars... need to handle it during verification anyway.
3338 * g_assert (klass && klass == src->klass && klass == dest->klass);
3341 if (mini_is_gsharedvt_klass (klass)) {
3343 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3344 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3348 n = mono_class_native_size (klass, &align);
3350 n = mono_class_value_size (klass, &align);
3352 /* if native is true there should be no references in the struct */
3353 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3354 /* Avoid barriers when storing to the stack */
3355 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3356 (dest->opcode == OP_LDADDR))) {
3362 context_used = mini_class_check_context_used (cfg, klass);
3364 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3365 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3367 } else if (context_used) {
3368 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3370 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3371 if (!cfg->compile_aot)
3372 mono_class_compute_gc_descriptor (klass);
3376 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3378 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3383 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3384 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3385 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3390 iargs [2] = size_ins;
3392 EMIT_NEW_ICONST (cfg, iargs [2], n);
3394 memcpy_method = get_memcpy_method ();
3396 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3398 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3403 get_memset_method (void)
3405 static MonoMethod *memset_method = NULL;
3406 if (!memset_method) {
3407 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3409 g_error ("Old corlib found. Install a new one");
3411 return memset_method;
3415 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3417 MonoInst *iargs [3];
3420 MonoMethod *memset_method;
3421 MonoInst *size_ins = NULL;
3422 MonoInst *bzero_ins = NULL;
3423 static MonoMethod *bzero_method;
3425 /* FIXME: Optimize this for the case when dest is an LDADDR */
3426 mono_class_init (klass);
3427 if (mini_is_gsharedvt_klass (klass)) {
3428 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3429 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3431 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3432 g_assert (bzero_method);
3434 iargs [1] = size_ins;
3435 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3439 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3441 n = mono_class_value_size (klass, &align);
3443 if (n <= sizeof (gpointer) * 8) {
3444 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3447 memset_method = get_memset_method ();
3449 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3450 EMIT_NEW_ICONST (cfg, iargs [2], n);
3451 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3458 * Emit IR to return either the this pointer for instance method,
3459 * or the mrgctx for static methods.
3462 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3464 MonoInst *this_ins = NULL;
3466 g_assert (cfg->gshared);
3468 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3469 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3470 !method->klass->valuetype)
3471 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3473 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3474 MonoInst *mrgctx_loc, *mrgctx_var;
3476 g_assert (!this_ins);
3477 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3479 mrgctx_loc = mono_get_vtable_var (cfg);
3480 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3483 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3484 MonoInst *vtable_loc, *vtable_var;
3486 g_assert (!this_ins);
3488 vtable_loc = mono_get_vtable_var (cfg);
3489 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3491 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3492 MonoInst *mrgctx_var = vtable_var;
3495 vtable_reg = alloc_preg (cfg);
3496 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3497 vtable_var->type = STACK_PTR;
3505 vtable_reg = alloc_preg (cfg);
3506 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3511 static MonoJumpInfoRgctxEntry *
3512 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3514 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3515 res->method = method;
3516 res->in_mrgctx = in_mrgctx;
3517 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3518 res->data->type = patch_type;
3519 res->data->data.target = patch_data;
3520 res->info_type = info_type;
3525 static inline MonoInst*
3526 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3528 MonoInst *args [16];
3531 // FIXME: No fastpath since the slot is not a compile time constant
3533 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3534 if (entry->in_mrgctx)
3535 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3537 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3541 * FIXME: This can be called during decompose, which is a problem since it creates
3543 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3545 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3547 MonoBasicBlock *is_null_bb, *end_bb;
3548 MonoInst *res, *ins, *call;
3551 slot = mini_get_rgctx_entry_slot (entry);
3553 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3554 index = MONO_RGCTX_SLOT_INDEX (slot);
3556 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3557 for (depth = 0; ; ++depth) {
3558 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3560 if (index < size - 1)
3565 NEW_BBLOCK (cfg, end_bb);
3566 NEW_BBLOCK (cfg, is_null_bb);
3569 rgctx_reg = rgctx->dreg;
3571 rgctx_reg = alloc_preg (cfg);
3573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3574 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3575 NEW_BBLOCK (cfg, is_null_bb);
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3581 for (i = 0; i < depth; ++i) {
3582 int array_reg = alloc_preg (cfg);
3584 /* load ptr to next array */
3585 if (mrgctx && i == 0)
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3589 rgctx_reg = array_reg;
3590 /* is the ptr null? */
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3592 /* if yes, jump to actual trampoline */
3593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3597 val_reg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3599 /* is the slot null? */
3600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3601 /* if yes, jump to actual trampoline */
3602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3605 res_reg = alloc_preg (cfg);
3606 MONO_INST_NEW (cfg, ins, OP_MOVE);
3607 ins->dreg = res_reg;
3608 ins->sreg1 = val_reg;
3609 MONO_ADD_INS (cfg->cbb, ins);
3611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3614 MONO_START_BB (cfg, is_null_bb);
3616 EMIT_NEW_ICONST (cfg, args [1], index);
3618 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3620 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3621 MONO_INST_NEW (cfg, ins, OP_MOVE);
3622 ins->dreg = res_reg;
3623 ins->sreg1 = call->dreg;
3624 MONO_ADD_INS (cfg->cbb, ins);
3625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3627 MONO_START_BB (cfg, end_bb);
3636 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3639 static inline MonoInst*
3640 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3643 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3645 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3649 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3650 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3652 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3653 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3655 return emit_rgctx_fetch (cfg, rgctx, entry);
3659 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3660 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3662 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3663 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3665 return emit_rgctx_fetch (cfg, rgctx, entry);
3669 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3670 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3672 MonoJumpInfoGSharedVtCall *call_info;
3673 MonoJumpInfoRgctxEntry *entry;
3676 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3677 call_info->sig = sig;
3678 call_info->method = cmethod;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_virt_method:
3689 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3692 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3693 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3695 MonoJumpInfoVirtMethod *info;
3696 MonoJumpInfoRgctxEntry *entry;
3699 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3700 info->klass = klass;
3701 info->method = virt_method;
3703 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3704 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3706 return emit_rgctx_fetch (cfg, rgctx, entry);
3710 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3711 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3713 MonoJumpInfoRgctxEntry *entry;
3716 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3717 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 * emit_get_rgctx_method:
3725 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3726 * normal constants, else emit a load from the rgctx.
3729 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3730 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3732 if (!context_used) {
3735 switch (rgctx_type) {
3736 case MONO_RGCTX_INFO_METHOD:
3737 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3739 case MONO_RGCTX_INFO_METHOD_RGCTX:
3740 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3743 g_assert_not_reached ();
3746 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3747 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3749 return emit_rgctx_fetch (cfg, rgctx, entry);
3754 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3755 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3757 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3758 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3760 return emit_rgctx_fetch (cfg, rgctx, entry);
3764 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3766 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3767 MonoRuntimeGenericContextInfoTemplate *template_;
3772 for (i = 0; i < info->num_entries; ++i) {
3773 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3775 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3779 if (info->num_entries == info->count_entries) {
3780 MonoRuntimeGenericContextInfoTemplate *new_entries;
3781 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3783 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3785 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3786 info->entries = new_entries;
3787 info->count_entries = new_count_entries;
3790 idx = info->num_entries;
3791 template_ = &info->entries [idx];
3792 template_->info_type = rgctx_type;
3793 template_->data = data;
3795 info->num_entries ++;
3801 * emit_get_gsharedvt_info:
3803 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3806 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3811 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3812 /* Load info->entries [idx] */
3813 dreg = alloc_preg (cfg);
3814 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3820 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3822 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3826 * On return the caller must check @klass for load errors.
3829 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3831 MonoInst *vtable_arg;
3834 context_used = mini_class_check_context_used (cfg, klass);
3837 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3838 klass, MONO_RGCTX_INFO_VTABLE);
3840 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3844 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3847 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3851 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3852 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3854 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3855 ins->sreg1 = vtable_arg->dreg;
3856 MONO_ADD_INS (cfg->cbb, ins);
3858 static int byte_offset = -1;
3859 static guint8 bitmask;
3860 int bits_reg, inited_reg;
3861 MonoBasicBlock *inited_bb;
3862 MonoInst *args [16];
3864 if (byte_offset < 0)
3865 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3867 bits_reg = alloc_ireg (cfg);
3868 inited_reg = alloc_ireg (cfg);
3870 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3873 NEW_BBLOCK (cfg, inited_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3878 args [0] = vtable_arg;
3879 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3881 MONO_START_BB (cfg, inited_bb);
3886 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3890 if (cfg->gen_seq_points && cfg->method == method) {
3891 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3893 ins->flags |= MONO_INST_NONEMPTY_STACK;
3894 MONO_ADD_INS (cfg->cbb, ins);
3899 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3901 if (mini_get_debug_options ()->better_cast_details) {
3902 int vtable_reg = alloc_preg (cfg);
3903 int klass_reg = alloc_preg (cfg);
3904 MonoBasicBlock *is_null_bb = NULL;
3906 int to_klass_reg, context_used;
3909 NEW_BBLOCK (cfg, is_null_bb);
3911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3912 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3915 tls_get = mono_get_jit_tls_intrinsic (cfg);
3917 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3921 MONO_ADD_INS (cfg->cbb, tls_get);
3922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3927 context_used = mini_class_check_context_used (cfg, klass);
3929 MonoInst *class_ins;
3931 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3932 to_klass_reg = class_ins->dreg;
3934 to_klass_reg = alloc_preg (cfg);
3935 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3940 MONO_START_BB (cfg, is_null_bb);
3945 reset_cast_details (MonoCompile *cfg)
3947 /* Reset the variables holding the cast details */
3948 if (mini_get_debug_options ()->better_cast_details) {
3949 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3951 MONO_ADD_INS (cfg->cbb, tls_get);
3952 /* It is enough to reset the from field */
3953 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3958 * On return the caller must check @array_class for load errors
3961 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3963 int vtable_reg = alloc_preg (cfg);
3966 context_used = mini_class_check_context_used (cfg, array_class);
3968 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3970 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3972 if (cfg->opt & MONO_OPT_SHARED) {
3973 int class_reg = alloc_preg (cfg);
3976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3977 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3978 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3979 } else if (context_used) {
3980 MonoInst *vtable_ins;
3982 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3983 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3985 if (cfg->compile_aot) {
3989 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3991 vt_reg = alloc_preg (cfg);
3992 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3993 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3996 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4002 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4004 reset_cast_details (cfg);
4008 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4009 * generic code is generated.
4012 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4014 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4017 MonoInst *rgctx, *addr;
4019 /* FIXME: What if the class is shared? We might not
4020 have to get the address of the method from the
4022 addr = emit_get_rgctx_method (cfg, context_used, method,
4023 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4024 if (cfg->llvm_only && cfg->gsharedvt) {
4025 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4027 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4029 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4032 gboolean pass_vtable, pass_mrgctx;
4033 MonoInst *rgctx_arg = NULL;
4035 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4036 g_assert (!pass_mrgctx);
4039 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4042 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4045 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4050 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4054 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4055 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4056 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4057 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4059 obj_reg = sp [0]->dreg;
4060 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4061 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4063 /* FIXME: generics */
4064 g_assert (klass->rank == 0);
4067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4068 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4074 MonoInst *element_class;
4076 /* This assertion is from the unboxcast insn */
4077 g_assert (klass->rank == 0);
4079 element_class = emit_get_rgctx_klass (cfg, context_used,
4080 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4082 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4083 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4085 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4086 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4087 reset_cast_details (cfg);
4090 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4091 MONO_ADD_INS (cfg->cbb, add);
4092 add->type = STACK_MP;
4099 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4101 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4102 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4106 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4112 args [1] = klass_inst;
4115 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4117 NEW_BBLOCK (cfg, is_ref_bb);
4118 NEW_BBLOCK (cfg, is_nullable_bb);
4119 NEW_BBLOCK (cfg, end_bb);
4120 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4127 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4128 addr_reg = alloc_dreg (cfg, STACK_MP);
4132 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4133 MONO_ADD_INS (cfg->cbb, addr);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4138 MONO_START_BB (cfg, is_ref_bb);
4140 /* Save the ref to a temporary */
4141 dreg = alloc_ireg (cfg);
4142 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4143 addr->dreg = addr_reg;
4144 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4148 MONO_START_BB (cfg, is_nullable_bb);
4151 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4152 MonoInst *unbox_call;
4153 MonoMethodSignature *unbox_sig;
4155 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4156 unbox_sig->ret = &klass->byval_arg;
4157 unbox_sig->param_count = 1;
4158 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4161 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4163 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4165 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4166 addr->dreg = addr_reg;
4169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4172 MONO_START_BB (cfg, end_bb);
4175 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4181 * Returns NULL and set the cfg exception on error.
4184 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4186 MonoInst *iargs [2];
4191 MonoRgctxInfoType rgctx_info;
4192 MonoInst *iargs [2];
4193 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4195 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4197 if (cfg->opt & MONO_OPT_SHARED)
4198 rgctx_info = MONO_RGCTX_INFO_KLASS;
4200 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4201 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4203 if (cfg->opt & MONO_OPT_SHARED) {
4204 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4206 alloc_ftn = ves_icall_object_new;
4209 alloc_ftn = ves_icall_object_new_specific;
4212 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4213 if (known_instance_size) {
4214 int size = mono_class_instance_size (klass);
4215 if (size < sizeof (MonoObject))
4216 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4218 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4220 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4223 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4226 if (cfg->opt & MONO_OPT_SHARED) {
4227 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4228 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4230 alloc_ftn = ves_icall_object_new;
4231 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4232 /* This happens often in argument checking code, eg. throw new FooException... */
4233 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4234 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4235 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4237 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4238 MonoMethod *managed_alloc = NULL;
4242 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4243 cfg->exception_ptr = klass;
4247 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4249 if (managed_alloc) {
4250 int size = mono_class_instance_size (klass);
4251 if (size < sizeof (MonoObject))
4252 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4254 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4255 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4256 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4258 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4260 guint32 lw = vtable->klass->instance_size;
4261 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4262 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4263 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4266 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4270 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4274 * Returns NULL and set the cfg exception on error.
4277 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4279 MonoInst *alloc, *ins;
4281 if (mono_class_is_nullable (klass)) {
4282 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4285 if (cfg->llvm_only && cfg->gsharedvt) {
4286 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4287 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4288 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4290 /* FIXME: What if the class is shared? We might not
4291 have to get the method address from the RGCTX. */
4292 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4293 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4294 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4296 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4299 gboolean pass_vtable, pass_mrgctx;
4300 MonoInst *rgctx_arg = NULL;
4302 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4303 g_assert (!pass_mrgctx);
4306 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4309 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4312 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4316 if (mini_is_gsharedvt_klass (klass)) {
4317 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4318 MonoInst *res, *is_ref, *src_var, *addr;
4321 dreg = alloc_ireg (cfg);
4323 NEW_BBLOCK (cfg, is_ref_bb);
4324 NEW_BBLOCK (cfg, is_nullable_bb);
4325 NEW_BBLOCK (cfg, end_bb);
4326 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4334 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4338 ins->opcode = OP_STOREV_MEMBASE;
4340 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4341 res->type = STACK_OBJ;
4343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4346 MONO_START_BB (cfg, is_ref_bb);
4348 /* val is a vtype, so has to load the value manually */
4349 src_var = get_vreg_to_inst (cfg, val->dreg);
4351 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4352 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4357 MONO_START_BB (cfg, is_nullable_bb);
4360 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4361 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4363 MonoMethodSignature *box_sig;
4366 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4367 * construct that method at JIT time, so have to do things by hand.
4369 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4370 box_sig->ret = &mono_defaults.object_class->byval_arg;
4371 box_sig->param_count = 1;
4372 box_sig->params [0] = &klass->byval_arg;
4375 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4377 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4378 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4379 res->type = STACK_OBJ;
4383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4385 MONO_START_BB (cfg, end_bb);
4389 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4393 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4399 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4402 MonoGenericContainer *container;
4403 MonoGenericInst *ginst;
4405 if (klass->generic_class) {
4406 container = klass->generic_class->container_class->generic_container;
4407 ginst = klass->generic_class->context.class_inst;
4408 } else if (klass->generic_container && context_used) {
4409 container = klass->generic_container;
4410 ginst = container->context.class_inst;
4415 for (i = 0; i < container->type_argc; ++i) {
4417 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4419 type = ginst->type_argv [i];
4420 if (mini_type_is_reference (type))
4426 static GHashTable* direct_icall_type_hash;
4429 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4431 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4432 if (!direct_icalls_enabled (cfg))
4436 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4437 * Whitelist a few icalls for now.
4439 if (!direct_icall_type_hash) {
4440 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4442 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4443 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4444 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4445 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4446 mono_memory_barrier ();
4447 direct_icall_type_hash = h;
4450 if (cmethod->klass == mono_defaults.math_class)
4452 /* No locking needed */
4453 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4458 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4461 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4463 MonoMethod *mono_castclass;
4466 mono_castclass = mono_marshal_get_castclass_with_cache ();
4468 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4469 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4470 reset_cast_details (cfg);
4476 get_castclass_cache_idx (MonoCompile *cfg)
4478 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4479 cfg->castclass_cache_index ++;
4480 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4484 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4493 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4496 idx = get_castclass_cache_idx (cfg);
4497 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4499 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4500 return emit_castclass_with_cache (cfg, klass, args);
4504 * Returns NULL and set the cfg exception on error.
4507 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4509 MonoBasicBlock *is_null_bb;
4510 int obj_reg = src->dreg;
4511 int vtable_reg = alloc_preg (cfg);
4513 MonoInst *klass_inst = NULL, *res;
4515 context_used = mini_class_check_context_used (cfg, klass);
4517 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4518 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4519 (*inline_costs) += 2;
4521 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4522 MonoMethod *mono_castclass;
4523 MonoInst *iargs [1];
4526 mono_castclass = mono_marshal_get_castclass (klass);
4529 save_cast_details (cfg, klass, src->dreg, TRUE);
4530 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4531 iargs, ip, cfg->real_offset, TRUE);
4532 reset_cast_details (cfg);
4533 CHECK_CFG_EXCEPTION;
4534 g_assert (costs > 0);
4536 cfg->real_offset += 5;
4538 (*inline_costs) += costs;
4546 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4547 MonoInst *cache_ins;
4549 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4554 /* klass - it's the second element of the cache entry*/
4555 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4558 args [2] = cache_ins;
4560 return emit_castclass_with_cache (cfg, klass, args);
4563 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4566 NEW_BBLOCK (cfg, is_null_bb);
4568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4571 save_cast_details (cfg, klass, obj_reg, FALSE);
4573 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4575 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4577 int klass_reg = alloc_preg (cfg);
4579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4581 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4582 /* the remoting code is broken, access the class for now */
4583 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4584 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4586 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4587 cfg->exception_ptr = klass;
4590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4595 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4598 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4602 MONO_START_BB (cfg, is_null_bb);
4604 reset_cast_details (cfg);
4613 * Returns NULL and set the cfg exception on error.
4616 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4619 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4620 int obj_reg = src->dreg;
4621 int vtable_reg = alloc_preg (cfg);
4622 int res_reg = alloc_ireg_ref (cfg);
4623 MonoInst *klass_inst = NULL;
4628 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4629 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4630 MonoInst *cache_ins;
4632 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4637 /* klass - it's the second element of the cache entry*/
4638 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4641 args [2] = cache_ins;
4643 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4646 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4649 NEW_BBLOCK (cfg, is_null_bb);
4650 NEW_BBLOCK (cfg, false_bb);
4651 NEW_BBLOCK (cfg, end_bb);
4653 /* Do the assignment at the beginning, so the other assignment can be if converted */
4654 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4655 ins->type = STACK_OBJ;
4658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4663 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4664 g_assert (!context_used);
4665 /* the is_null_bb target simply copies the input register to the output */
4666 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4668 int klass_reg = alloc_preg (cfg);
4671 int rank_reg = alloc_preg (cfg);
4672 int eclass_reg = alloc_preg (cfg);
4674 g_assert (!context_used);
4675 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4680 if (klass->cast_class == mono_defaults.object_class) {
4681 int parent_reg = alloc_preg (cfg);
4682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4683 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4684 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4686 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4687 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4688 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4690 } else if (klass->cast_class == mono_defaults.enum_class) {
4691 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4696 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4697 /* Check that the object is a vector too */
4698 int bounds_reg = alloc_preg (cfg);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4704 /* the is_null_bb target simply copies the input register to the output */
4705 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4707 } else if (mono_class_is_nullable (klass)) {
4708 g_assert (!context_used);
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4710 /* the is_null_bb target simply copies the input register to the output */
4711 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4713 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4714 g_assert (!context_used);
4715 /* the remoting code is broken, access the class for now */
4716 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4717 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4719 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4720 cfg->exception_ptr = klass;
4723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4731 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4732 /* the is_null_bb target simply copies the input register to the output */
4733 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4738 MONO_START_BB (cfg, false_bb);
4740 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4741 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4743 MONO_START_BB (cfg, is_null_bb);
4745 MONO_START_BB (cfg, end_bb);
4751 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4753 /* This opcode takes as input an object reference and a class, and returns:
4754 0) if the object is an instance of the class,
4755 1) if the object is not instance of the class,
4756 2) if the object is a proxy whose type cannot be determined */
4759 #ifndef DISABLE_REMOTING
4760 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4762 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4764 int obj_reg = src->dreg;
4765 int dreg = alloc_ireg (cfg);
4767 #ifndef DISABLE_REMOTING
4768 int klass_reg = alloc_preg (cfg);
4771 NEW_BBLOCK (cfg, true_bb);
4772 NEW_BBLOCK (cfg, false_bb);
4773 NEW_BBLOCK (cfg, end_bb);
4774 #ifndef DISABLE_REMOTING
4775 NEW_BBLOCK (cfg, false2_bb);
4776 NEW_BBLOCK (cfg, no_proxy_bb);
4779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4782 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4783 #ifndef DISABLE_REMOTING
4784 NEW_BBLOCK (cfg, interface_fail_bb);
4787 tmp_reg = alloc_preg (cfg);
4788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4789 #ifndef DISABLE_REMOTING
4790 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4791 MONO_START_BB (cfg, interface_fail_bb);
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4794 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4796 tmp_reg = alloc_preg (cfg);
4797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4799 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4801 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4804 #ifndef DISABLE_REMOTING
4805 tmp_reg = alloc_preg (cfg);
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4809 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4810 tmp_reg = alloc_preg (cfg);
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4814 tmp_reg = alloc_preg (cfg);
4815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4817 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4819 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4820 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4822 MONO_START_BB (cfg, no_proxy_bb);
4824 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4826 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4830 MONO_START_BB (cfg, false_bb);
4832 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4835 #ifndef DISABLE_REMOTING
4836 MONO_START_BB (cfg, false2_bb);
4838 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4839 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4842 MONO_START_BB (cfg, true_bb);
4844 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4846 MONO_START_BB (cfg, end_bb);
4849 MONO_INST_NEW (cfg, ins, OP_ICONST);
4851 ins->type = STACK_I4;
4857 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4859 /* This opcode takes as input an object reference and a class, and returns:
4860 0) if the object is an instance of the class,
4861 1) if the object is a proxy whose type cannot be determined
4862 an InvalidCastException exception is thrown otherwhise*/
4865 #ifndef DISABLE_REMOTING
4866 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4868 MonoBasicBlock *ok_result_bb;
4870 int obj_reg = src->dreg;
4871 int dreg = alloc_ireg (cfg);
4872 int tmp_reg = alloc_preg (cfg);
4874 #ifndef DISABLE_REMOTING
4875 int klass_reg = alloc_preg (cfg);
4876 NEW_BBLOCK (cfg, end_bb);
4879 NEW_BBLOCK (cfg, ok_result_bb);
4881 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4882 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4884 save_cast_details (cfg, klass, obj_reg, FALSE);
4886 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4887 #ifndef DISABLE_REMOTING
4888 NEW_BBLOCK (cfg, interface_fail_bb);
4890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4891 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4892 MONO_START_BB (cfg, interface_fail_bb);
4893 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4895 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4897 tmp_reg = alloc_preg (cfg);
4898 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4900 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4902 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4903 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4906 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4910 #ifndef DISABLE_REMOTING
4911 NEW_BBLOCK (cfg, no_proxy_bb);
4913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4915 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4917 tmp_reg = alloc_preg (cfg);
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4921 tmp_reg = alloc_preg (cfg);
4922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4924 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4926 NEW_BBLOCK (cfg, fail_1_bb);
4928 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4930 MONO_START_BB (cfg, fail_1_bb);
4932 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4935 MONO_START_BB (cfg, no_proxy_bb);
4937 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4939 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4943 MONO_START_BB (cfg, ok_result_bb);
4945 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4947 #ifndef DISABLE_REMOTING
4948 MONO_START_BB (cfg, end_bb);
4952 MONO_INST_NEW (cfg, ins, OP_ICONST);
4954 ins->type = STACK_I4;
4959 static G_GNUC_UNUSED MonoInst*
4960 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4962 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4963 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4966 switch (enum_type->type) {
4969 #if SIZEOF_REGISTER == 8
4981 MonoInst *load, *and_, *cmp, *ceq;
4982 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4983 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4984 int dest_reg = alloc_ireg (cfg);
4986 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4987 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4988 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4989 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4991 ceq->type = STACK_I4;
4994 load = mono_decompose_opcode (cfg, load);
4995 and_ = mono_decompose_opcode (cfg, and_);
4996 cmp = mono_decompose_opcode (cfg, cmp);
4997 ceq = mono_decompose_opcode (cfg, ceq);
5005 * Returns NULL and set the cfg exception on error.
5007 static G_GNUC_UNUSED MonoInst*
5008 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5012 gpointer trampoline;
5013 MonoInst *obj, *method_ins, *tramp_ins;
5017 if (virtual_ && !cfg->llvm_only) {
5018 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5021 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5025 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5029 /* Inline the contents of mono_delegate_ctor */
5031 /* Set target field */
5032 /* Optimize away setting of NULL target */
5033 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5034 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5035 if (cfg->gen_write_barriers) {
5036 dreg = alloc_preg (cfg);
5037 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5038 emit_write_barrier (cfg, ptr, target);
5042 /* Set method field */
5043 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5047 * To avoid looking up the compiled code belonging to the target method
5048 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5049 * store it, and we fill it after the method has been compiled.
5051 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5052 MonoInst *code_slot_ins;
5055 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5057 domain = mono_domain_get ();
5058 mono_domain_lock (domain);
5059 if (!domain_jit_info (domain)->method_code_hash)
5060 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5061 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5063 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5064 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5066 mono_domain_unlock (domain);
5068 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5073 if (cfg->llvm_only) {
5074 MonoInst *args [16];
5079 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5080 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5083 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5089 if (cfg->compile_aot) {
5090 MonoDelegateClassMethodPair *del_tramp;
5092 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5093 del_tramp->klass = klass;
5094 del_tramp->method = context_used ? NULL : method;
5095 del_tramp->is_virtual = virtual_;
5096 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5099 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5101 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5102 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5105 /* Set invoke_impl field */
5107 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5109 dreg = alloc_preg (cfg);
5110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5113 dreg = alloc_preg (cfg);
5114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5118 dreg = alloc_preg (cfg);
5119 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5120 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5122 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5128 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5130 MonoJitICallInfo *info;
5132 /* Need to register the icall so it gets an icall wrapper */
5133 info = mono_get_array_new_va_icall (rank);
5135 cfg->flags |= MONO_CFG_HAS_VARARGS;
5137 /* mono_array_new_va () needs a vararg calling convention */
5138 cfg->exception_message = g_strdup ("array-new");
5139 cfg->disable_llvm = TRUE;
5141 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5142 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5146 * handle_constrained_gsharedvt_call:
5148 * Handle constrained calls where the receiver is a gsharedvt type.
5149 * Return the instruction representing the call. Set the cfg exception on failure.
5152 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5153 gboolean *ref_emit_widen)
5155 MonoInst *ins = NULL;
5156 gboolean emit_widen = *ref_emit_widen;
5159 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5160 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5161 * pack the arguments into an array, and do the rest of the work in in an icall.
5163 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5164 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5165 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5166 MonoInst *args [16];
5169 * This case handles calls to
5170 * - object:ToString()/Equals()/GetHashCode(),
5171 * - System.IComparable<T>:CompareTo()
5172 * - System.IEquatable<T>:Equals ()
5173 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5177 if (mono_method_check_context_used (cmethod))
5178 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5180 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5181 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5183 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5184 if (fsig->hasthis && fsig->param_count) {
5185 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5186 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5187 ins->dreg = alloc_preg (cfg);
5188 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5189 MONO_ADD_INS (cfg->cbb, ins);
5192 if (mini_is_gsharedvt_type (fsig->params [0])) {
5193 int addr_reg, deref_arg_reg;
5195 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5196 deref_arg_reg = alloc_preg (cfg);
5197 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5198 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5200 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5201 addr_reg = ins->dreg;
5202 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5204 EMIT_NEW_ICONST (cfg, args [3], 0);
5205 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5208 EMIT_NEW_ICONST (cfg, args [3], 0);
5209 EMIT_NEW_ICONST (cfg, args [4], 0);
5211 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5214 if (mini_is_gsharedvt_type (fsig->ret)) {
5215 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5216 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5220 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5221 MONO_ADD_INS (cfg->cbb, add);
5223 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5224 MONO_ADD_INS (cfg->cbb, ins);
5225 /* ins represents the call result */
5228 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5231 *ref_emit_widen = emit_widen;
5240 mono_emit_load_got_addr (MonoCompile *cfg)
5242 MonoInst *getaddr, *dummy_use;
5244 if (!cfg->got_var || cfg->got_var_allocated)
5247 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5248 getaddr->cil_code = cfg->header->code;
5249 getaddr->dreg = cfg->got_var->dreg;
5251 /* Add it to the start of the first bblock */
5252 if (cfg->bb_entry->code) {
5253 getaddr->next = cfg->bb_entry->code;
5254 cfg->bb_entry->code = getaddr;
5257 MONO_ADD_INS (cfg->bb_entry, getaddr);
5259 cfg->got_var_allocated = TRUE;
5262 * Add a dummy use to keep the got_var alive, since real uses might
5263 * only be generated by the back ends.
5264 * Add it to end_bblock, so the variable's lifetime covers the whole
5266 * It would be better to make the usage of the got var explicit in all
5267 * cases when the backend needs it (i.e. calls, throw etc.), so this
5268 * wouldn't be needed.
5270 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5271 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5274 static int inline_limit;
5275 static gboolean inline_limit_inited;
5278 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5280 MonoMethodHeaderSummary header;
5282 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5283 MonoMethodSignature *sig = mono_method_signature (method);
5287 if (cfg->disable_inline)
5292 if (cfg->inline_depth > 10)
5295 if (!mono_method_get_header_summary (method, &header))
5298 /*runtime, icall and pinvoke are checked by summary call*/
5299 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5300 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5301 (mono_class_is_marshalbyref (method->klass)) ||
5305 /* also consider num_locals? */
5306 /* Do the size check early to avoid creating vtables */
5307 if (!inline_limit_inited) {
5308 if (g_getenv ("MONO_INLINELIMIT"))
5309 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5311 inline_limit = INLINE_LENGTH_LIMIT;
5312 inline_limit_inited = TRUE;
5314 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5318 * if we can initialize the class of the method right away, we do,
5319 * otherwise we don't allow inlining if the class needs initialization,
5320 * since it would mean inserting a call to mono_runtime_class_init()
5321 * inside the inlined code
5323 if (!(cfg->opt & MONO_OPT_SHARED)) {
5324 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5325 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5326 vtable = mono_class_vtable (cfg->domain, method->klass);
5329 if (!cfg->compile_aot)
5330 mono_runtime_class_init (vtable);
5331 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5332 if (cfg->run_cctors && method->klass->has_cctor) {
5333 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5334 if (!method->klass->runtime_info)
5335 /* No vtable created yet */
5337 vtable = mono_class_vtable (cfg->domain, method->klass);
5340 /* This makes so that inline cannot trigger */
5341 /* .cctors: too many apps depend on them */
5342 /* running with a specific order... */
5343 if (! vtable->initialized)
5345 mono_runtime_class_init (vtable);
5347 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5348 if (!method->klass->runtime_info)
5349 /* No vtable created yet */
5351 vtable = mono_class_vtable (cfg->domain, method->klass);
5354 if (!vtable->initialized)
5359 * If we're compiling for shared code
5360 * the cctor will need to be run at aot method load time, for example,
5361 * or at the end of the compilation of the inlining method.
5363 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5367 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5368 if (mono_arch_is_soft_float ()) {
5370 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5372 for (i = 0; i < sig->param_count; ++i)
5373 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5378 if (g_list_find (cfg->dont_inline, method))
5385 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5387 if (!cfg->compile_aot) {
5389 if (vtable->initialized)
5393 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5394 if (cfg->method == method)
5398 if (!mono_class_needs_cctor_run (klass, method))
5401 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5402 /* The initialization is already done before the method is called */
5409 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5413 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5416 if (mini_is_gsharedvt_variable_klass (klass)) {
5419 mono_class_init (klass);
5420 size = mono_class_array_element_size (klass);
5423 mult_reg = alloc_preg (cfg);
5424 array_reg = arr->dreg;
5425 index_reg = index->dreg;
5427 #if SIZEOF_REGISTER == 8
5428 /* The array reg is 64 bits but the index reg is only 32 */
5429 if (COMPILE_LLVM (cfg)) {
5431 index2_reg = index_reg;
5433 index2_reg = alloc_preg (cfg);
5434 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5437 if (index->type == STACK_I8) {
5438 index2_reg = alloc_preg (cfg);
5439 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5441 index2_reg = index_reg;
5446 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5449 if (size == 1 || size == 2 || size == 4 || size == 8) {
5450 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5452 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5453 ins->klass = mono_class_get_element_class (klass);
5454 ins->type = STACK_MP;
5460 add_reg = alloc_ireg_mp (cfg);
5463 MonoInst *rgctx_ins;
5466 g_assert (cfg->gshared);
5467 context_used = mini_class_check_context_used (cfg, klass);
5468 g_assert (context_used);
5469 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5470 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5474 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5475 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5476 ins->klass = mono_class_get_element_class (klass);
5477 ins->type = STACK_MP;
5478 MONO_ADD_INS (cfg->cbb, ins);
5484 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5486 int bounds_reg = alloc_preg (cfg);
5487 int add_reg = alloc_ireg_mp (cfg);
5488 int mult_reg = alloc_preg (cfg);
5489 int mult2_reg = alloc_preg (cfg);
5490 int low1_reg = alloc_preg (cfg);
5491 int low2_reg = alloc_preg (cfg);
5492 int high1_reg = alloc_preg (cfg);
5493 int high2_reg = alloc_preg (cfg);
5494 int realidx1_reg = alloc_preg (cfg);
5495 int realidx2_reg = alloc_preg (cfg);
5496 int sum_reg = alloc_preg (cfg);
5497 int index1, index2, tmpreg;
5501 mono_class_init (klass);
5502 size = mono_class_array_element_size (klass);
5504 index1 = index_ins1->dreg;
5505 index2 = index_ins2->dreg;
5507 #if SIZEOF_REGISTER == 8
5508 /* The array reg is 64 bits but the index reg is only 32 */
5509 if (COMPILE_LLVM (cfg)) {
5512 tmpreg = alloc_preg (cfg);
5513 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5515 tmpreg = alloc_preg (cfg);
5516 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5520 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5524 /* range checking */
5525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5526 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5528 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5529 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5530 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5531 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5532 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5533 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5534 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5537 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5538 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5539 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5540 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5542 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5544 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5545 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5547 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5548 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5550 ins->type = STACK_MP;
5552 MONO_ADD_INS (cfg->cbb, ins);
5558 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5562 MonoMethod *addr_method;
5564 MonoClass *eclass = cmethod->klass->element_class;
5566 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5569 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5571 /* emit_ldelema_2 depends on OP_LMUL */
5572 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5573 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5576 if (mini_is_gsharedvt_variable_klass (eclass))
5579 element_size = mono_class_array_element_size (eclass);
5580 addr_method = mono_marshal_get_array_address (rank, element_size);
5581 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5586 static MonoBreakPolicy
5587 always_insert_breakpoint (MonoMethod *method)
5589 return MONO_BREAK_POLICY_ALWAYS;
5592 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5595 * mono_set_break_policy:
5596 * policy_callback: the new callback function
5598 * Allow embedders to decide wherther to actually obey breakpoint instructions
5599 * (both break IL instructions and Debugger.Break () method calls), for example
5600 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5601 * untrusted or semi-trusted code.
5603 * @policy_callback will be called every time a break point instruction needs to
5604 * be inserted with the method argument being the method that calls Debugger.Break()
5605 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5606 * if it wants the breakpoint to not be effective in the given method.
5607 * #MONO_BREAK_POLICY_ALWAYS is the default.
5610 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5612 if (policy_callback)
5613 break_policy_func = policy_callback;
5615 break_policy_func = always_insert_breakpoint;
5619 should_insert_brekpoint (MonoMethod *method) {
5620 switch (break_policy_func (method)) {
5621 case MONO_BREAK_POLICY_ALWAYS:
5623 case MONO_BREAK_POLICY_NEVER:
5625 case MONO_BREAK_POLICY_ON_DBG:
5626 g_warning ("mdb no longer supported");
5629 g_warning ("Incorrect value returned from break policy callback");
5634 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5636 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5638 MonoInst *addr, *store, *load;
5639 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5641 /* the bounds check is already done by the callers */
5642 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5645 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5646 if (mini_type_is_reference (fsig->params [2]))
5647 emit_write_barrier (cfg, addr, load);
5649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5650 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5657 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5659 return mini_type_is_reference (&klass->byval_arg);
5663 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5665 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5666 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5667 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5668 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5669 MonoInst *iargs [3];
5672 mono_class_setup_vtable (obj_array);
5673 g_assert (helper->slot);
5675 if (sp [0]->type != STACK_OBJ)
5677 if (sp [2]->type != STACK_OBJ)
5684 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5688 if (mini_is_gsharedvt_variable_klass (klass)) {
5691 // FIXME-VT: OP_ICONST optimization
5692 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5693 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5694 ins->opcode = OP_STOREV_MEMBASE;
5695 } else if (sp [1]->opcode == OP_ICONST) {
5696 int array_reg = sp [0]->dreg;
5697 int index_reg = sp [1]->dreg;
5698 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5700 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5701 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5704 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5705 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5707 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5708 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5709 if (generic_class_is_reference_type (cfg, klass))
5710 emit_write_barrier (cfg, addr, sp [2]);
5717 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5722 eklass = mono_class_from_mono_type (fsig->params [2]);
5724 eklass = mono_class_from_mono_type (fsig->ret);
5727 return emit_array_store (cfg, eklass, args, FALSE);
5729 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5730 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5736 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5739 int param_size, return_size;
5741 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5742 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5744 if (cfg->verbose_level > 3)
5745 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5747 //Don't allow mixing reference types with value types
5748 if (param_klass->valuetype != return_klass->valuetype) {
5749 if (cfg->verbose_level > 3)
5750 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5754 if (!param_klass->valuetype) {
5755 if (cfg->verbose_level > 3)
5756 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5761 if (param_klass->has_references || return_klass->has_references)
5764 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5765 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5766 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5767 if (cfg->verbose_level > 3)
5768 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5772 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5773 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5774 if (cfg->verbose_level > 3)
5775 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5779 param_size = mono_class_value_size (param_klass, &align);
5780 return_size = mono_class_value_size (return_klass, &align);
5782 //We can do it if sizes match
5783 if (param_size == return_size) {
5784 if (cfg->verbose_level > 3)
5785 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5789 //No simple way to handle struct if sizes don't match
5790 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5791 if (cfg->verbose_level > 3)
5792 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5797 * Same reg size category.
5798 * A quick note on why we don't require widening here.
5799 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5801 * Since the source value comes from a function argument, the JIT will already have
5802 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5804 if (param_size <= 4 && return_size <= 4) {
5805 if (cfg->verbose_level > 3)
5806 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5814 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5816 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5817 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5819 if (mini_is_gsharedvt_variable_type (fsig->ret))
5822 //Valuetypes that are semantically equivalent or numbers than can be widened to
5823 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5826 //Arrays of valuetypes that are semantically equivalent
5827 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5834 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5836 #ifdef MONO_ARCH_SIMD_INTRINSICS
5837 MonoInst *ins = NULL;
5839 if (cfg->opt & MONO_OPT_SIMD) {
5840 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5846 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5850 emit_memory_barrier (MonoCompile *cfg, int kind)
5852 MonoInst *ins = NULL;
5853 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5854 MONO_ADD_INS (cfg->cbb, ins);
5855 ins->backend.memory_barrier_kind = kind;
5861 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5863 MonoInst *ins = NULL;
5866 /* The LLVM backend supports these intrinsics */
5867 if (cmethod->klass == mono_defaults.math_class) {
5868 if (strcmp (cmethod->name, "Sin") == 0) {
5870 } else if (strcmp (cmethod->name, "Cos") == 0) {
5872 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5874 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5878 if (opcode && fsig->param_count == 1) {
5879 MONO_INST_NEW (cfg, ins, opcode);
5880 ins->type = STACK_R8;
5881 ins->dreg = mono_alloc_freg (cfg);
5882 ins->sreg1 = args [0]->dreg;
5883 MONO_ADD_INS (cfg->cbb, ins);
5887 if (cfg->opt & MONO_OPT_CMOV) {
5888 if (strcmp (cmethod->name, "Min") == 0) {
5889 if (fsig->params [0]->type == MONO_TYPE_I4)
5891 if (fsig->params [0]->type == MONO_TYPE_U4)
5892 opcode = OP_IMIN_UN;
5893 else if (fsig->params [0]->type == MONO_TYPE_I8)
5895 else if (fsig->params [0]->type == MONO_TYPE_U8)
5896 opcode = OP_LMIN_UN;
5897 } else if (strcmp (cmethod->name, "Max") == 0) {
5898 if (fsig->params [0]->type == MONO_TYPE_I4)
5900 if (fsig->params [0]->type == MONO_TYPE_U4)
5901 opcode = OP_IMAX_UN;
5902 else if (fsig->params [0]->type == MONO_TYPE_I8)
5904 else if (fsig->params [0]->type == MONO_TYPE_U8)
5905 opcode = OP_LMAX_UN;
5909 if (opcode && fsig->param_count == 2) {
5910 MONO_INST_NEW (cfg, ins, opcode);
5911 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5912 ins->dreg = mono_alloc_ireg (cfg);
5913 ins->sreg1 = args [0]->dreg;
5914 ins->sreg2 = args [1]->dreg;
5915 MONO_ADD_INS (cfg->cbb, ins);
5923 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5925 if (cmethod->klass == mono_defaults.array_class) {
5926 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5927 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5928 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5929 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5930 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5931 return emit_array_unsafe_mov (cfg, fsig, args);
5938 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5940 MonoInst *ins = NULL;
5942 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5944 if (cmethod->klass == mono_defaults.string_class) {
5945 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5946 int dreg = alloc_ireg (cfg);
5947 int index_reg = alloc_preg (cfg);
5948 int add_reg = alloc_preg (cfg);
5950 #if SIZEOF_REGISTER == 8
5951 if (COMPILE_LLVM (cfg)) {
5952 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5954 /* The array reg is 64 bits but the index reg is only 32 */
5955 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5958 index_reg = args [1]->dreg;
5960 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5962 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5963 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5964 add_reg = ins->dreg;
5965 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5968 int mult_reg = alloc_preg (cfg);
5969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5970 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5971 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5972 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5974 type_from_op (cfg, ins, NULL, NULL);
5976 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5977 int dreg = alloc_ireg (cfg);
5978 /* Decompose later to allow more optimizations */
5979 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5980 ins->type = STACK_I4;
5981 ins->flags |= MONO_INST_FAULT;
5982 cfg->cbb->has_array_access = TRUE;
5983 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5988 } else if (cmethod->klass == mono_defaults.object_class) {
5989 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5990 int dreg = alloc_ireg_ref (cfg);
5991 int vt_reg = alloc_preg (cfg);
5992 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5993 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5994 type_from_op (cfg, ins, NULL, NULL);
5997 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5998 int dreg = alloc_ireg (cfg);
5999 int t1 = alloc_ireg (cfg);
6001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6002 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6003 ins->type = STACK_I4;
6006 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6007 MONO_INST_NEW (cfg, ins, OP_NOP);
6008 MONO_ADD_INS (cfg->cbb, ins);
6012 } else if (cmethod->klass == mono_defaults.array_class) {
6013 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6014 return emit_array_generic_access (cfg, fsig, args, FALSE);
6015 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6016 return emit_array_generic_access (cfg, fsig, args, TRUE);
6018 #ifndef MONO_BIG_ARRAYS
6020 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6023 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6024 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6025 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6026 int dreg = alloc_ireg (cfg);
6027 int bounds_reg = alloc_ireg_mp (cfg);
6028 MonoBasicBlock *end_bb, *szarray_bb;
6029 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6031 NEW_BBLOCK (cfg, end_bb);
6032 NEW_BBLOCK (cfg, szarray_bb);
6034 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6035 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6036 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6037 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6038 /* Non-szarray case */
6040 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6041 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6043 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6044 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6045 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6046 MONO_START_BB (cfg, szarray_bb);
6049 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6050 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6052 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6053 MONO_START_BB (cfg, end_bb);
6055 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6056 ins->type = STACK_I4;
6062 if (cmethod->name [0] != 'g')
6065 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6066 int dreg = alloc_ireg (cfg);
6067 int vtable_reg = alloc_preg (cfg);
6068 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6069 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6070 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6071 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6072 type_from_op (cfg, ins, NULL, NULL);
6075 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6076 int dreg = alloc_ireg (cfg);
6078 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6079 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6080 type_from_op (cfg, ins, NULL, NULL);
6085 } else if (cmethod->klass == runtime_helpers_class) {
6086 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6087 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6091 } else if (cmethod->klass == mono_defaults.monitor_class) {
6092 gboolean is_enter = FALSE;
6093 gboolean is_v4 = FALSE;
6095 if (!strcmp (cmethod->name, "enter_with_atomic_var") && mono_method_signature (cmethod)->param_count == 2) {
6099 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6104 * To make async stack traces work, icalls which can block should have a wrapper.
6105 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6107 MonoBasicBlock *end_bb;
6109 NEW_BBLOCK (cfg, end_bb);
6111 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6113 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6114 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6115 MONO_START_BB (cfg, end_bb);
6118 } else if (cmethod->klass == mono_defaults.thread_class) {
6119 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6120 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6121 MONO_ADD_INS (cfg->cbb, ins);
6123 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6124 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6125 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6127 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6129 if (fsig->params [0]->type == MONO_TYPE_I1)
6130 opcode = OP_LOADI1_MEMBASE;
6131 else if (fsig->params [0]->type == MONO_TYPE_U1)
6132 opcode = OP_LOADU1_MEMBASE;
6133 else if (fsig->params [0]->type == MONO_TYPE_I2)
6134 opcode = OP_LOADI2_MEMBASE;
6135 else if (fsig->params [0]->type == MONO_TYPE_U2)
6136 opcode = OP_LOADU2_MEMBASE;
6137 else if (fsig->params [0]->type == MONO_TYPE_I4)
6138 opcode = OP_LOADI4_MEMBASE;
6139 else if (fsig->params [0]->type == MONO_TYPE_U4)
6140 opcode = OP_LOADU4_MEMBASE;
6141 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6142 opcode = OP_LOADI8_MEMBASE;
6143 else if (fsig->params [0]->type == MONO_TYPE_R4)
6144 opcode = OP_LOADR4_MEMBASE;
6145 else if (fsig->params [0]->type == MONO_TYPE_R8)
6146 opcode = OP_LOADR8_MEMBASE;
6147 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6148 opcode = OP_LOAD_MEMBASE;
6151 MONO_INST_NEW (cfg, ins, opcode);
6152 ins->inst_basereg = args [0]->dreg;
6153 ins->inst_offset = 0;
6154 MONO_ADD_INS (cfg->cbb, ins);
6156 switch (fsig->params [0]->type) {
6163 ins->dreg = mono_alloc_ireg (cfg);
6164 ins->type = STACK_I4;
6168 ins->dreg = mono_alloc_lreg (cfg);
6169 ins->type = STACK_I8;
6173 ins->dreg = mono_alloc_ireg (cfg);
6174 #if SIZEOF_REGISTER == 8
6175 ins->type = STACK_I8;
6177 ins->type = STACK_I4;
6182 ins->dreg = mono_alloc_freg (cfg);
6183 ins->type = STACK_R8;
6186 g_assert (mini_type_is_reference (fsig->params [0]));
6187 ins->dreg = mono_alloc_ireg_ref (cfg);
6188 ins->type = STACK_OBJ;
6192 if (opcode == OP_LOADI8_MEMBASE)
6193 ins = mono_decompose_opcode (cfg, ins);
6195 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6199 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6201 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6203 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6204 opcode = OP_STOREI1_MEMBASE_REG;
6205 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6206 opcode = OP_STOREI2_MEMBASE_REG;
6207 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6208 opcode = OP_STOREI4_MEMBASE_REG;
6209 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6210 opcode = OP_STOREI8_MEMBASE_REG;
6211 else if (fsig->params [0]->type == MONO_TYPE_R4)
6212 opcode = OP_STORER4_MEMBASE_REG;
6213 else if (fsig->params [0]->type == MONO_TYPE_R8)
6214 opcode = OP_STORER8_MEMBASE_REG;
6215 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6216 opcode = OP_STORE_MEMBASE_REG;
6219 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6221 MONO_INST_NEW (cfg, ins, opcode);
6222 ins->sreg1 = args [1]->dreg;
6223 ins->inst_destbasereg = args [0]->dreg;
6224 ins->inst_offset = 0;
6225 MONO_ADD_INS (cfg->cbb, ins);
6227 if (opcode == OP_STOREI8_MEMBASE_REG)
6228 ins = mono_decompose_opcode (cfg, ins);
6233 } else if (cmethod->klass->image == mono_defaults.corlib &&
6234 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6235 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6238 #if SIZEOF_REGISTER == 8
6239 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6240 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6241 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6242 ins->dreg = mono_alloc_preg (cfg);
6243 ins->sreg1 = args [0]->dreg;
6244 ins->type = STACK_I8;
6245 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6246 MONO_ADD_INS (cfg->cbb, ins);
6250 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6252 /* 64 bit reads are already atomic */
6253 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6254 load_ins->dreg = mono_alloc_preg (cfg);
6255 load_ins->inst_basereg = args [0]->dreg;
6256 load_ins->inst_offset = 0;
6257 load_ins->type = STACK_I8;
6258 MONO_ADD_INS (cfg->cbb, load_ins);
6260 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6267 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6268 MonoInst *ins_iconst;
6271 if (fsig->params [0]->type == MONO_TYPE_I4) {
6272 opcode = OP_ATOMIC_ADD_I4;
6273 cfg->has_atomic_add_i4 = TRUE;
6275 #if SIZEOF_REGISTER == 8
6276 else if (fsig->params [0]->type == MONO_TYPE_I8)
6277 opcode = OP_ATOMIC_ADD_I8;
6280 if (!mono_arch_opcode_supported (opcode))
6282 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6283 ins_iconst->inst_c0 = 1;
6284 ins_iconst->dreg = mono_alloc_ireg (cfg);
6285 MONO_ADD_INS (cfg->cbb, ins_iconst);
6287 MONO_INST_NEW (cfg, ins, opcode);
6288 ins->dreg = mono_alloc_ireg (cfg);
6289 ins->inst_basereg = args [0]->dreg;
6290 ins->inst_offset = 0;
6291 ins->sreg2 = ins_iconst->dreg;
6292 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6293 MONO_ADD_INS (cfg->cbb, ins);
6295 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6296 MonoInst *ins_iconst;
6299 if (fsig->params [0]->type == MONO_TYPE_I4) {
6300 opcode = OP_ATOMIC_ADD_I4;
6301 cfg->has_atomic_add_i4 = TRUE;
6303 #if SIZEOF_REGISTER == 8
6304 else if (fsig->params [0]->type == MONO_TYPE_I8)
6305 opcode = OP_ATOMIC_ADD_I8;
6308 if (!mono_arch_opcode_supported (opcode))
6310 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6311 ins_iconst->inst_c0 = -1;
6312 ins_iconst->dreg = mono_alloc_ireg (cfg);
6313 MONO_ADD_INS (cfg->cbb, ins_iconst);
6315 MONO_INST_NEW (cfg, ins, opcode);
6316 ins->dreg = mono_alloc_ireg (cfg);
6317 ins->inst_basereg = args [0]->dreg;
6318 ins->inst_offset = 0;
6319 ins->sreg2 = ins_iconst->dreg;
6320 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6321 MONO_ADD_INS (cfg->cbb, ins);
6323 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6326 if (fsig->params [0]->type == MONO_TYPE_I4) {
6327 opcode = OP_ATOMIC_ADD_I4;
6328 cfg->has_atomic_add_i4 = TRUE;
6330 #if SIZEOF_REGISTER == 8
6331 else if (fsig->params [0]->type == MONO_TYPE_I8)
6332 opcode = OP_ATOMIC_ADD_I8;
6335 if (!mono_arch_opcode_supported (opcode))
6337 MONO_INST_NEW (cfg, ins, opcode);
6338 ins->dreg = mono_alloc_ireg (cfg);
6339 ins->inst_basereg = args [0]->dreg;
6340 ins->inst_offset = 0;
6341 ins->sreg2 = args [1]->dreg;
6342 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6343 MONO_ADD_INS (cfg->cbb, ins);
6346 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6347 MonoInst *f2i = NULL, *i2f;
6348 guint32 opcode, f2i_opcode, i2f_opcode;
6349 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6350 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6352 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6353 fsig->params [0]->type == MONO_TYPE_R4) {
6354 opcode = OP_ATOMIC_EXCHANGE_I4;
6355 f2i_opcode = OP_MOVE_F_TO_I4;
6356 i2f_opcode = OP_MOVE_I4_TO_F;
6357 cfg->has_atomic_exchange_i4 = TRUE;
6359 #if SIZEOF_REGISTER == 8
6361 fsig->params [0]->type == MONO_TYPE_I8 ||
6362 fsig->params [0]->type == MONO_TYPE_R8 ||
6363 fsig->params [0]->type == MONO_TYPE_I) {
6364 opcode = OP_ATOMIC_EXCHANGE_I8;
6365 f2i_opcode = OP_MOVE_F_TO_I8;
6366 i2f_opcode = OP_MOVE_I8_TO_F;
6369 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6370 opcode = OP_ATOMIC_EXCHANGE_I4;
6371 cfg->has_atomic_exchange_i4 = TRUE;
6377 if (!mono_arch_opcode_supported (opcode))
6381 /* TODO: Decompose these opcodes instead of bailing here. */
6382 if (COMPILE_SOFT_FLOAT (cfg))
6385 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6386 f2i->dreg = mono_alloc_ireg (cfg);
6387 f2i->sreg1 = args [1]->dreg;
6388 if (f2i_opcode == OP_MOVE_F_TO_I4)
6389 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6390 MONO_ADD_INS (cfg->cbb, f2i);
6393 MONO_INST_NEW (cfg, ins, opcode);
6394 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6395 ins->inst_basereg = args [0]->dreg;
6396 ins->inst_offset = 0;
6397 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6398 MONO_ADD_INS (cfg->cbb, ins);
6400 switch (fsig->params [0]->type) {
6402 ins->type = STACK_I4;
6405 ins->type = STACK_I8;
6408 #if SIZEOF_REGISTER == 8
6409 ins->type = STACK_I8;
6411 ins->type = STACK_I4;
6416 ins->type = STACK_R8;
6419 g_assert (mini_type_is_reference (fsig->params [0]));
6420 ins->type = STACK_OBJ;
6425 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6426 i2f->dreg = mono_alloc_freg (cfg);
6427 i2f->sreg1 = ins->dreg;
6428 i2f->type = STACK_R8;
6429 if (i2f_opcode == OP_MOVE_I4_TO_F)
6430 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6431 MONO_ADD_INS (cfg->cbb, i2f);
6436 if (cfg->gen_write_barriers && is_ref)
6437 emit_write_barrier (cfg, args [0], args [1]);
6439 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6440 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6441 guint32 opcode, f2i_opcode, i2f_opcode;
6442 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6443 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6445 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6446 fsig->params [1]->type == MONO_TYPE_R4) {
6447 opcode = OP_ATOMIC_CAS_I4;
6448 f2i_opcode = OP_MOVE_F_TO_I4;
6449 i2f_opcode = OP_MOVE_I4_TO_F;
6450 cfg->has_atomic_cas_i4 = TRUE;
6452 #if SIZEOF_REGISTER == 8
6454 fsig->params [1]->type == MONO_TYPE_I8 ||
6455 fsig->params [1]->type == MONO_TYPE_R8 ||
6456 fsig->params [1]->type == MONO_TYPE_I) {
6457 opcode = OP_ATOMIC_CAS_I8;
6458 f2i_opcode = OP_MOVE_F_TO_I8;
6459 i2f_opcode = OP_MOVE_I8_TO_F;
6462 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6463 opcode = OP_ATOMIC_CAS_I4;
6464 cfg->has_atomic_cas_i4 = TRUE;
6470 if (!mono_arch_opcode_supported (opcode))
6474 /* TODO: Decompose these opcodes instead of bailing here. */
6475 if (COMPILE_SOFT_FLOAT (cfg))
6478 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6479 f2i_new->dreg = mono_alloc_ireg (cfg);
6480 f2i_new->sreg1 = args [1]->dreg;
6481 if (f2i_opcode == OP_MOVE_F_TO_I4)
6482 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6483 MONO_ADD_INS (cfg->cbb, f2i_new);
6485 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6486 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6487 f2i_cmp->sreg1 = args [2]->dreg;
6488 if (f2i_opcode == OP_MOVE_F_TO_I4)
6489 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6490 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6493 MONO_INST_NEW (cfg, ins, opcode);
6494 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6495 ins->sreg1 = args [0]->dreg;
6496 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6497 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6498 MONO_ADD_INS (cfg->cbb, ins);
6500 switch (fsig->params [1]->type) {
6502 ins->type = STACK_I4;
6505 ins->type = STACK_I8;
6508 #if SIZEOF_REGISTER == 8
6509 ins->type = STACK_I8;
6511 ins->type = STACK_I4;
6515 ins->type = cfg->r4_stack_type;
6518 ins->type = STACK_R8;
6521 g_assert (mini_type_is_reference (fsig->params [1]));
6522 ins->type = STACK_OBJ;
6527 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6528 i2f->dreg = mono_alloc_freg (cfg);
6529 i2f->sreg1 = ins->dreg;
6530 i2f->type = STACK_R8;
6531 if (i2f_opcode == OP_MOVE_I4_TO_F)
6532 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6533 MONO_ADD_INS (cfg->cbb, i2f);
6538 if (cfg->gen_write_barriers && is_ref)
6539 emit_write_barrier (cfg, args [0], args [1]);
6541 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6542 fsig->params [1]->type == MONO_TYPE_I4) {
6543 MonoInst *cmp, *ceq;
6545 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6548 /* int32 r = CAS (location, value, comparand); */
6549 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6550 ins->dreg = alloc_ireg (cfg);
6551 ins->sreg1 = args [0]->dreg;
6552 ins->sreg2 = args [1]->dreg;
6553 ins->sreg3 = args [2]->dreg;
6554 ins->type = STACK_I4;
6555 MONO_ADD_INS (cfg->cbb, ins);
6557 /* bool result = r == comparand; */
6558 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6559 cmp->sreg1 = ins->dreg;
6560 cmp->sreg2 = args [2]->dreg;
6561 cmp->type = STACK_I4;
6562 MONO_ADD_INS (cfg->cbb, cmp);
6564 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6565 ceq->dreg = alloc_ireg (cfg);
6566 ceq->type = STACK_I4;
6567 MONO_ADD_INS (cfg->cbb, ceq);
6569 /* *success = result; */
6570 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6572 cfg->has_atomic_cas_i4 = TRUE;
6574 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6575 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6579 } else if (cmethod->klass->image == mono_defaults.corlib &&
6580 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6581 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6584 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6586 MonoType *t = fsig->params [0];
6588 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6590 g_assert (t->byref);
6591 /* t is a byref type, so the reference check is more complicated */
6592 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6593 if (t->type == MONO_TYPE_I1)
6594 opcode = OP_ATOMIC_LOAD_I1;
6595 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6596 opcode = OP_ATOMIC_LOAD_U1;
6597 else if (t->type == MONO_TYPE_I2)
6598 opcode = OP_ATOMIC_LOAD_I2;
6599 else if (t->type == MONO_TYPE_U2)
6600 opcode = OP_ATOMIC_LOAD_U2;
6601 else if (t->type == MONO_TYPE_I4)
6602 opcode = OP_ATOMIC_LOAD_I4;
6603 else if (t->type == MONO_TYPE_U4)
6604 opcode = OP_ATOMIC_LOAD_U4;
6605 else if (t->type == MONO_TYPE_R4)
6606 opcode = OP_ATOMIC_LOAD_R4;
6607 else if (t->type == MONO_TYPE_R8)
6608 opcode = OP_ATOMIC_LOAD_R8;
6609 #if SIZEOF_REGISTER == 8
6610 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6611 opcode = OP_ATOMIC_LOAD_I8;
6612 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6613 opcode = OP_ATOMIC_LOAD_U8;
6615 else if (t->type == MONO_TYPE_I)
6616 opcode = OP_ATOMIC_LOAD_I4;
6617 else if (is_ref || t->type == MONO_TYPE_U)
6618 opcode = OP_ATOMIC_LOAD_U4;
6622 if (!mono_arch_opcode_supported (opcode))
6625 MONO_INST_NEW (cfg, ins, opcode);
6626 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6627 ins->sreg1 = args [0]->dreg;
6628 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6629 MONO_ADD_INS (cfg->cbb, ins);
6632 case MONO_TYPE_BOOLEAN:
6639 ins->type = STACK_I4;
6643 ins->type = STACK_I8;
6647 #if SIZEOF_REGISTER == 8
6648 ins->type = STACK_I8;
6650 ins->type = STACK_I4;
6654 ins->type = cfg->r4_stack_type;
6657 ins->type = STACK_R8;
6661 ins->type = STACK_OBJ;
6667 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6669 MonoType *t = fsig->params [0];
6672 g_assert (t->byref);
6673 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6674 if (t->type == MONO_TYPE_I1)
6675 opcode = OP_ATOMIC_STORE_I1;
6676 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6677 opcode = OP_ATOMIC_STORE_U1;
6678 else if (t->type == MONO_TYPE_I2)
6679 opcode = OP_ATOMIC_STORE_I2;
6680 else if (t->type == MONO_TYPE_U2)
6681 opcode = OP_ATOMIC_STORE_U2;
6682 else if (t->type == MONO_TYPE_I4)
6683 opcode = OP_ATOMIC_STORE_I4;
6684 else if (t->type == MONO_TYPE_U4)
6685 opcode = OP_ATOMIC_STORE_U4;
6686 else if (t->type == MONO_TYPE_R4)
6687 opcode = OP_ATOMIC_STORE_R4;
6688 else if (t->type == MONO_TYPE_R8)
6689 opcode = OP_ATOMIC_STORE_R8;
6690 #if SIZEOF_REGISTER == 8
6691 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6692 opcode = OP_ATOMIC_STORE_I8;
6693 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6694 opcode = OP_ATOMIC_STORE_U8;
6696 else if (t->type == MONO_TYPE_I)
6697 opcode = OP_ATOMIC_STORE_I4;
6698 else if (is_ref || t->type == MONO_TYPE_U)
6699 opcode = OP_ATOMIC_STORE_U4;
6703 if (!mono_arch_opcode_supported (opcode))
6706 MONO_INST_NEW (cfg, ins, opcode);
6707 ins->dreg = args [0]->dreg;
6708 ins->sreg1 = args [1]->dreg;
6709 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6710 MONO_ADD_INS (cfg->cbb, ins);
6712 if (cfg->gen_write_barriers && is_ref)
6713 emit_write_barrier (cfg, args [0], args [1]);
6719 } else if (cmethod->klass->image == mono_defaults.corlib &&
6720 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6721 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6722 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6723 if (should_insert_brekpoint (cfg->method)) {
6724 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6726 MONO_INST_NEW (cfg, ins, OP_NOP);
6727 MONO_ADD_INS (cfg->cbb, ins);
6731 } else if (cmethod->klass->image == mono_defaults.corlib &&
6732 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6733 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6734 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6736 EMIT_NEW_ICONST (cfg, ins, 1);
6738 EMIT_NEW_ICONST (cfg, ins, 0);
6741 } else if (cmethod->klass->image == mono_defaults.corlib &&
6742 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6743 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6744 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6745 /* No stack walks are currently available, so implement this as an intrinsic */
6746 MonoInst *assembly_ins;
6748 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6749 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6752 } else if (cmethod->klass->image == mono_defaults.corlib &&
6753 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6754 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6755 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6756 /* No stack walks are currently available, so implement this as an intrinsic */
6757 MonoInst *method_ins;
6758 MonoMethod *declaring = cfg->method;
6760 /* This returns the declaring generic method */
6761 if (declaring->is_inflated)
6762 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6763 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6764 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6765 cfg->no_inline = TRUE;
6766 if (cfg->method != cfg->current_method)
6767 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6770 } else if (cmethod->klass == mono_defaults.math_class) {
6772 * There is general branchless code for Min/Max, but it does not work for
6774 * http://everything2.com/?node_id=1051618
6776 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6777 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6778 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6779 !strcmp (cmethod->klass->name, "Selector")) ||
6780 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6781 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6782 !strcmp (cmethod->klass->name, "Selector"))
6784 if (cfg->backend->have_objc_get_selector &&
6785 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6786 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6787 cfg->compile_aot && !cfg->llvm_only) {
6789 MonoJumpInfoToken *ji;
6794 cfg->exception_message = g_strdup ("GetHandle");
6795 cfg->disable_llvm = TRUE;
6797 if (args [0]->opcode == OP_GOT_ENTRY) {
6798 pi = (MonoInst *)args [0]->inst_p1;
6799 g_assert (pi->opcode == OP_PATCH_INFO);
6800 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6801 ji = (MonoJumpInfoToken *)pi->inst_p0;
6803 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6804 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6807 NULLIFY_INS (args [0]);
6810 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6811 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6812 ins->dreg = mono_alloc_ireg (cfg);
6814 ins->inst_p0 = mono_string_to_utf8 (s);
6815 MONO_ADD_INS (cfg->cbb, ins);
6820 #ifdef MONO_ARCH_SIMD_INTRINSICS
6821 if (cfg->opt & MONO_OPT_SIMD) {
6822 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6828 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6832 if (COMPILE_LLVM (cfg)) {
6833 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6838 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6842 * This entry point could be used later for arbitrary method
6845 inline static MonoInst*
6846 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6847 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6849 if (method->klass == mono_defaults.string_class) {
6850 /* managed string allocation support */
6851 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6852 MonoInst *iargs [2];
6853 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6854 MonoMethod *managed_alloc = NULL;
6856 g_assert (vtable); /*Should not fail since it System.String*/
6857 #ifndef MONO_CROSS_COMPILE
6858 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6862 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6863 iargs [1] = args [0];
6864 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6871 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6873 MonoInst *store, *temp;
6876 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6877 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6880 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6881 * would be different than the MonoInst's used to represent arguments, and
6882 * the ldelema implementation can't deal with that.
6883 * Solution: When ldelema is used on an inline argument, create a var for
6884 * it, emit ldelema on that var, and emit the saving code below in
6885 * inline_method () if needed.
6887 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6888 cfg->args [i] = temp;
6889 /* This uses cfg->args [i] which is set by the preceeding line */
6890 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6891 store->cil_code = sp [0]->cil_code;
6896 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6897 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6899 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6901 check_inline_called_method_name_limit (MonoMethod *called_method)
6904 static const char *limit = NULL;
6906 if (limit == NULL) {
6907 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6909 if (limit_string != NULL)
6910 limit = limit_string;
6915 if (limit [0] != '\0') {
6916 char *called_method_name = mono_method_full_name (called_method, TRUE);
6918 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6919 g_free (called_method_name);
6921 //return (strncmp_result <= 0);
6922 return (strncmp_result == 0);
6929 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6931 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6934 static const char *limit = NULL;
6936 if (limit == NULL) {
6937 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6938 if (limit_string != NULL) {
6939 limit = limit_string;
6945 if (limit [0] != '\0') {
6946 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6948 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6949 g_free (caller_method_name);
6951 //return (strncmp_result <= 0);
6952 return (strncmp_result == 0);
6960 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6962 static double r8_0 = 0.0;
6963 static float r4_0 = 0.0;
6967 rtype = mini_get_underlying_type (rtype);
6971 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6972 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6973 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6974 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6975 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6976 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6977 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6978 ins->type = STACK_R4;
6979 ins->inst_p0 = (void*)&r4_0;
6981 MONO_ADD_INS (cfg->cbb, ins);
6982 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6983 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6984 ins->type = STACK_R8;
6985 ins->inst_p0 = (void*)&r8_0;
6987 MONO_ADD_INS (cfg->cbb, ins);
6988 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6989 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6990 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6991 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6992 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6994 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6999 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7003 rtype = mini_get_underlying_type (rtype);
7007 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7008 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7009 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7010 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7011 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7012 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7013 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7014 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7016 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7017 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7018 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7019 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7020 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7022 emit_init_rvar (cfg, dreg, rtype);
7026 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7028 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7030 MonoInst *var = cfg->locals [local];
7031 if (COMPILE_SOFT_FLOAT (cfg)) {
7033 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7034 emit_init_rvar (cfg, reg, type);
7035 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7038 emit_init_rvar (cfg, var->dreg, type);
7040 emit_dummy_init_rvar (cfg, var->dreg, type);
7047 * Return the cost of inlining CMETHOD.
7050 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7051 guchar *ip, guint real_offset, gboolean inline_always)
7053 MonoInst *ins, *rvar = NULL;
7054 MonoMethodHeader *cheader;
7055 MonoBasicBlock *ebblock, *sbblock;
7057 MonoMethod *prev_inlined_method;
7058 MonoInst **prev_locals, **prev_args;
7059 MonoType **prev_arg_types;
7060 guint prev_real_offset;
7061 GHashTable *prev_cbb_hash;
7062 MonoBasicBlock **prev_cil_offset_to_bb;
7063 MonoBasicBlock *prev_cbb;
7064 unsigned char* prev_cil_start;
7065 guint32 prev_cil_offset_to_bb_len;
7066 MonoMethod *prev_current_method;
7067 MonoGenericContext *prev_generic_context;
7068 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7070 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7072 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7073 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7076 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7077 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7082 fsig = mono_method_signature (cmethod);
7084 if (cfg->verbose_level > 2)
7085 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7087 if (!cmethod->inline_info) {
7088 cfg->stat_inlineable_methods++;
7089 cmethod->inline_info = 1;
7092 /* allocate local variables */
7093 cheader = mono_method_get_header (cmethod);
7095 if (cheader == NULL || mono_loader_get_last_error ()) {
7097 mono_metadata_free_mh (cheader);
7098 if (inline_always && mono_loader_get_last_error ()) {
7099 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7100 mono_error_set_from_loader_error (&cfg->error);
7103 mono_loader_clear_error ();
7107 /*Must verify before creating locals as it can cause the JIT to assert.*/
7108 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7109 mono_metadata_free_mh (cheader);
7113 /* allocate space to store the return value */
7114 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7115 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7118 prev_locals = cfg->locals;
7119 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7120 for (i = 0; i < cheader->num_locals; ++i)
7121 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7123 /* allocate start and end blocks */
7124 /* This is needed so if the inline is aborted, we can clean up */
7125 NEW_BBLOCK (cfg, sbblock);
7126 sbblock->real_offset = real_offset;
7128 NEW_BBLOCK (cfg, ebblock);
7129 ebblock->block_num = cfg->num_bblocks++;
7130 ebblock->real_offset = real_offset;
7132 prev_args = cfg->args;
7133 prev_arg_types = cfg->arg_types;
7134 prev_inlined_method = cfg->inlined_method;
7135 cfg->inlined_method = cmethod;
7136 cfg->ret_var_set = FALSE;
7137 cfg->inline_depth ++;
7138 prev_real_offset = cfg->real_offset;
7139 prev_cbb_hash = cfg->cbb_hash;
7140 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7141 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7142 prev_cil_start = cfg->cil_start;
7143 prev_cbb = cfg->cbb;
7144 prev_current_method = cfg->current_method;
7145 prev_generic_context = cfg->generic_context;
7146 prev_ret_var_set = cfg->ret_var_set;
7147 prev_disable_inline = cfg->disable_inline;
7149 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7152 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7154 ret_var_set = cfg->ret_var_set;
7156 cfg->inlined_method = prev_inlined_method;
7157 cfg->real_offset = prev_real_offset;
7158 cfg->cbb_hash = prev_cbb_hash;
7159 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7160 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7161 cfg->cil_start = prev_cil_start;
7162 cfg->locals = prev_locals;
7163 cfg->args = prev_args;
7164 cfg->arg_types = prev_arg_types;
7165 cfg->current_method = prev_current_method;
7166 cfg->generic_context = prev_generic_context;
7167 cfg->ret_var_set = prev_ret_var_set;
7168 cfg->disable_inline = prev_disable_inline;
7169 cfg->inline_depth --;
7171 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7172 if (cfg->verbose_level > 2)
7173 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7175 cfg->stat_inlined_methods++;
7177 /* always add some code to avoid block split failures */
7178 MONO_INST_NEW (cfg, ins, OP_NOP);
7179 MONO_ADD_INS (prev_cbb, ins);
7181 prev_cbb->next_bb = sbblock;
7182 link_bblock (cfg, prev_cbb, sbblock);
7185 * Get rid of the begin and end bblocks if possible to aid local
7188 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7190 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7191 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7193 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7194 MonoBasicBlock *prev = ebblock->in_bb [0];
7196 if (prev->next_bb == ebblock) {
7197 mono_merge_basic_blocks (cfg, prev, ebblock);
7199 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7200 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7201 cfg->cbb = prev_cbb;
7204 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7209 * Its possible that the rvar is set in some prev bblock, but not in others.
7215 for (i = 0; i < ebblock->in_count; ++i) {
7216 bb = ebblock->in_bb [i];
7218 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7221 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7231 * If the inlined method contains only a throw, then the ret var is not
7232 * set, so set it to a dummy value.
7235 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7237 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7240 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7243 if (cfg->verbose_level > 2)
7244 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7245 cfg->exception_type = MONO_EXCEPTION_NONE;
7246 mono_loader_clear_error ();
7248 /* This gets rid of the newly added bblocks */
7249 cfg->cbb = prev_cbb;
7251 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7256 * Some of these comments may well be out-of-date.
7257 * Design decisions: we do a single pass over the IL code (and we do bblock
7258 * splitting/merging in the few cases when it's required: a back jump to an IL
7259 * address that was not already seen as bblock starting point).
7260 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7261 * Complex operations are decomposed in simpler ones right away. We need to let the
7262 * arch-specific code peek and poke inside this process somehow (except when the
7263 * optimizations can take advantage of the full semantic info of coarse opcodes).
7264 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7265 * MonoInst->opcode initially is the IL opcode or some simplification of that
7266 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7267 * opcode with value bigger than OP_LAST.
7268 * At this point the IR can be handed over to an interpreter, a dumb code generator
7269 * or to the optimizing code generator that will translate it to SSA form.
7271 * Profiling directed optimizations.
7272 * We may compile by default with few or no optimizations and instrument the code
7273 * or the user may indicate what methods to optimize the most either in a config file
7274 * or through repeated runs where the compiler applies offline the optimizations to
7275 * each method and then decides if it was worth it.
7278 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7279 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7280 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7281 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7282 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7283 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7284 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7285 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7287 /* offset from br.s -> br like opcodes */
7288 #define BIG_BRANCH_OFFSET 13
7291 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7293 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7295 return b == NULL || b == bb;
7299 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7301 unsigned char *ip = start;
7302 unsigned char *target;
7305 MonoBasicBlock *bblock;
7306 const MonoOpcode *opcode;
7309 cli_addr = ip - start;
7310 i = mono_opcode_value ((const guint8 **)&ip, end);
7313 opcode = &mono_opcodes [i];
7314 switch (opcode->argument) {
7315 case MonoInlineNone:
7318 case MonoInlineString:
7319 case MonoInlineType:
7320 case MonoInlineField:
7321 case MonoInlineMethod:
7324 case MonoShortInlineR:
7331 case MonoShortInlineVar:
7332 case MonoShortInlineI:
7335 case MonoShortInlineBrTarget:
7336 target = start + cli_addr + 2 + (signed char)ip [1];
7337 GET_BBLOCK (cfg, bblock, target);
7340 GET_BBLOCK (cfg, bblock, ip);
7342 case MonoInlineBrTarget:
7343 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7344 GET_BBLOCK (cfg, bblock, target);
7347 GET_BBLOCK (cfg, bblock, ip);
7349 case MonoInlineSwitch: {
7350 guint32 n = read32 (ip + 1);
7353 cli_addr += 5 + 4 * n;
7354 target = start + cli_addr;
7355 GET_BBLOCK (cfg, bblock, target);
7357 for (j = 0; j < n; ++j) {
7358 target = start + cli_addr + (gint32)read32 (ip);
7359 GET_BBLOCK (cfg, bblock, target);
7369 g_assert_not_reached ();
7372 if (i == CEE_THROW) {
7373 unsigned char *bb_start = ip - 1;
7375 /* Find the start of the bblock containing the throw */
7377 while ((bb_start >= start) && !bblock) {
7378 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7382 bblock->out_of_line = 1;
7392 static inline MonoMethod *
7393 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7397 mono_error_init (error);
7399 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7400 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7402 method = mono_class_inflate_generic_method_checked (method, context, error);
7405 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7411 static inline MonoMethod *
7412 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7415 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7417 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7418 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7422 if (!method && !cfg)
7423 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7428 static inline MonoClass*
7429 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7434 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7435 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7437 klass = mono_class_inflate_generic_class (klass, context);
7439 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7440 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7443 mono_class_init (klass);
7447 static inline MonoMethodSignature*
7448 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7450 MonoMethodSignature *fsig;
7452 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7453 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7455 fsig = mono_metadata_parse_signature (method->klass->image, token);
7459 fsig = mono_inflate_generic_signature(fsig, context, &error);
7461 g_assert(mono_error_ok(&error));
7467 throw_exception (void)
7469 static MonoMethod *method = NULL;
7472 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7473 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7480 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7482 MonoMethod *thrower = throw_exception ();
7485 EMIT_NEW_PCONST (cfg, args [0], ex);
7486 mono_emit_method_call (cfg, thrower, args, NULL);
7490 * Return the original method is a wrapper is specified. We can only access
7491 * the custom attributes from the original method.
7494 get_original_method (MonoMethod *method)
7496 if (method->wrapper_type == MONO_WRAPPER_NONE)
7499 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7500 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7503 /* in other cases we need to find the original method */
7504 return mono_marshal_method_from_wrapper (method);
7508 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7510 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7511 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7513 emit_throw_exception (cfg, ex);
7517 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7519 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7520 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7522 emit_throw_exception (cfg, ex);
7526 * Check that the IL instructions at ip are the array initialization
7527 * sequence and return the pointer to the data and the size.
7530 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7533 * newarr[System.Int32]
7535 * ldtoken field valuetype ...
7536 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7538 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7540 guint32 token = read32 (ip + 7);
7541 guint32 field_token = read32 (ip + 2);
7542 guint32 field_index = field_token & 0xffffff;
7544 const char *data_ptr;
7546 MonoMethod *cmethod;
7547 MonoClass *dummy_class;
7548 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7552 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7556 *out_field_token = field_token;
7558 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7561 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7563 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7564 case MONO_TYPE_BOOLEAN:
7568 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7569 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7570 case MONO_TYPE_CHAR:
7587 if (size > mono_type_size (field->type, &dummy_align))
7590 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7591 if (!image_is_dynamic (method->klass->image)) {
7592 field_index = read32 (ip + 2) & 0xffffff;
7593 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7594 data_ptr = mono_image_rva_map (method->klass->image, rva);
7595 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7596 /* for aot code we do the lookup on load */
7597 if (aot && data_ptr)
7598 return (const char *)GUINT_TO_POINTER (rva);
7600 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7602 data_ptr = mono_field_get_data (field);
7610 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7612 char *method_fname = mono_method_full_name (method, TRUE);
7614 MonoMethodHeader *header = mono_method_get_header (method);
7616 if (header->code_size == 0)
7617 method_code = g_strdup ("method body is empty.");
7619 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7620 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7621 g_free (method_fname);
7622 g_free (method_code);
7623 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7627 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7630 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7631 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7632 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7633 /* Optimize reg-reg moves away */
7635 * Can't optimize other opcodes, since sp[0] might point to
7636 * the last ins of a decomposed opcode.
7638 sp [0]->dreg = (cfg)->locals [n]->dreg;
7640 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7645 * ldloca inhibits many optimizations so try to get rid of it in common
7648 static inline unsigned char *
7649 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7659 local = read16 (ip + 2);
7663 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7664 /* From the INITOBJ case */
7665 token = read32 (ip + 2);
7666 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7667 CHECK_TYPELOAD (klass);
7668 type = mini_get_underlying_type (&klass->byval_arg);
7669 emit_init_local (cfg, local, type, TRUE);
7677 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7679 MonoInst *icall_args [16];
7680 MonoInst *call_target, *ins, *vtable_ins;
7681 int arg_reg, this_reg, vtable_reg;
7682 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7683 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7684 gboolean variant_iface = FALSE;
7689 * In llvm-only mode, vtables contain function descriptors instead of
7690 * method addresses/trampolines.
7692 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7695 slot = mono_method_get_imt_slot (cmethod);
7697 slot = mono_method_get_vtable_index (cmethod);
7699 this_reg = sp [0]->dreg;
7701 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7702 variant_iface = TRUE;
7704 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7706 * The simplest case, a normal virtual call.
7708 int slot_reg = alloc_preg (cfg);
7709 int addr_reg = alloc_preg (cfg);
7710 int arg_reg = alloc_preg (cfg);
7711 MonoBasicBlock *non_null_bb;
7713 vtable_reg = alloc_preg (cfg);
7714 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7715 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7717 /* Load the vtable slot, which contains a function descriptor. */
7718 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7720 NEW_BBLOCK (cfg, non_null_bb);
7722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7723 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7727 // FIXME: Make the wrapper use the preserveall cconv
7728 // FIXME: Use one icall per slot for small slot numbers ?
7729 icall_args [0] = vtable_ins;
7730 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7731 /* Make the icall return the vtable slot value to save some code space */
7732 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7733 ins->dreg = slot_reg;
7734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7737 MONO_START_BB (cfg, non_null_bb);
7738 /* Load the address + arg from the vtable slot */
7739 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7742 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7745 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7747 * A simple interface call
7749 * We make a call through an imt slot to obtain the function descriptor we need to call.
7750 * The imt slot contains a function descriptor for a runtime function + arg.
7752 int slot_reg = alloc_preg (cfg);
7753 int addr_reg = alloc_preg (cfg);
7754 int arg_reg = alloc_preg (cfg);
7755 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7757 vtable_reg = alloc_preg (cfg);
7758 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7759 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7762 * The slot is already initialized when the vtable is created so there is no need
7766 /* Load the imt slot, which contains a function descriptor. */
7767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7769 /* Load the address + arg of the imt thunk from the imt slot */
7770 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7771 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7773 * IMT thunks in llvm-only mode are C functions which take an info argument
7774 * plus the imt method and return the ftndesc to call.
7776 icall_args [0] = thunk_arg_ins;
7777 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7778 cmethod, MONO_RGCTX_INFO_METHOD);
7779 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7781 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7784 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7786 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7787 * dynamically extended as more instantiations are discovered.
7788 * This handles generic virtual methods both on classes and interfaces.
7790 int slot_reg = alloc_preg (cfg);
7791 int addr_reg = alloc_preg (cfg);
7792 int arg_reg = alloc_preg (cfg);
7793 int ftndesc_reg = alloc_preg (cfg);
7794 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7795 MonoBasicBlock *slowpath_bb, *end_bb;
7797 NEW_BBLOCK (cfg, slowpath_bb);
7798 NEW_BBLOCK (cfg, end_bb);
7800 vtable_reg = alloc_preg (cfg);
7801 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7803 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7805 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7807 /* Load the slot, which contains a function descriptor. */
7808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7810 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7811 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7813 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7816 /* Same as with iface calls */
7817 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7818 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7819 icall_args [0] = thunk_arg_ins;
7820 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7821 cmethod, MONO_RGCTX_INFO_METHOD);
7822 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7823 ftndesc_ins->dreg = ftndesc_reg;
7825 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7826 * they don't know about yet. Fall back to the slowpath in that case.
7828 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7829 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7834 MONO_START_BB (cfg, slowpath_bb);
7835 icall_args [0] = vtable_ins;
7836 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7837 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7838 cmethod, MONO_RGCTX_INFO_METHOD);
7840 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7842 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7843 ftndesc_ins->dreg = ftndesc_reg;
7844 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7847 MONO_START_BB (cfg, end_bb);
7848 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7852 * Non-optimized cases
7854 icall_args [0] = sp [0];
7855 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7857 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7858 cmethod, MONO_RGCTX_INFO_METHOD);
7860 arg_reg = alloc_preg (cfg);
7861 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7862 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7864 g_assert (is_gsharedvt);
7866 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7868 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7871 * Pass the extra argument even if the callee doesn't receive it, most
7872 * calling conventions allow this.
7874 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7878 is_exception_class (MonoClass *klass)
7881 if (klass == mono_defaults.exception_class)
7883 klass = klass->parent;
7889 * is_jit_optimizer_disabled:
7891 * Determine whenever M's assembly has a DebuggableAttribute with the
7892 * IsJITOptimizerDisabled flag set.
7895 is_jit_optimizer_disabled (MonoMethod *m)
7897 MonoAssembly *ass = m->klass->image->assembly;
7898 MonoCustomAttrInfo* attrs;
7901 gboolean val = FALSE;
7904 if (ass->jit_optimizer_disabled_inited)
7905 return ass->jit_optimizer_disabled;
7907 klass = mono_class_try_get_debuggable_attribute_class ();
7911 ass->jit_optimizer_disabled = FALSE;
7912 mono_memory_barrier ();
7913 ass->jit_optimizer_disabled_inited = TRUE;
7917 attrs = mono_custom_attrs_from_assembly (ass);
7919 for (i = 0; i < attrs->num_attrs; ++i) {
7920 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7922 MonoMethodSignature *sig;
7924 if (!attr->ctor || attr->ctor->klass != klass)
7926 /* Decode the attribute. See reflection.c */
7927 p = (const char*)attr->data;
7928 g_assert (read16 (p) == 0x0001);
7931 // FIXME: Support named parameters
7932 sig = mono_method_signature (attr->ctor);
7933 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7935 /* Two boolean arguments */
7939 mono_custom_attrs_free (attrs);
7942 ass->jit_optimizer_disabled = val;
7943 mono_memory_barrier ();
7944 ass->jit_optimizer_disabled_inited = TRUE;
7950 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7952 gboolean supported_tail_call;
7955 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7957 for (i = 0; i < fsig->param_count; ++i) {
7958 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7959 /* These can point to the current method's stack */
7960 supported_tail_call = FALSE;
7962 if (fsig->hasthis && cmethod->klass->valuetype)
7963 /* this might point to the current method's stack */
7964 supported_tail_call = FALSE;
7965 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7966 supported_tail_call = FALSE;
7967 if (cfg->method->save_lmf)
7968 supported_tail_call = FALSE;
7969 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7970 supported_tail_call = FALSE;
7971 if (call_opcode != CEE_CALL)
7972 supported_tail_call = FALSE;
7974 /* Debugging support */
7976 if (supported_tail_call) {
7977 if (!mono_debug_count ())
7978 supported_tail_call = FALSE;
7982 return supported_tail_call;
7988 * Handle calls made to ctors from NEWOBJ opcodes.
7991 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7992 MonoInst **sp, guint8 *ip, int *inline_costs)
7994 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7996 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7997 mono_method_is_generic_sharable (cmethod, TRUE)) {
7998 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7999 mono_class_vtable (cfg->domain, cmethod->klass);
8000 CHECK_TYPELOAD (cmethod->klass);
8002 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8003 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8006 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8007 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8009 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8011 CHECK_TYPELOAD (cmethod->klass);
8012 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8017 /* Avoid virtual calls to ctors if possible */
8018 if (mono_class_is_marshalbyref (cmethod->klass))
8019 callvirt_this_arg = sp [0];
8021 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8022 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8023 CHECK_CFG_EXCEPTION;
8024 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8025 mono_method_check_inlining (cfg, cmethod) &&
8026 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8029 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8030 cfg->real_offset += 5;
8032 *inline_costs += costs - 5;
8034 INLINE_FAILURE ("inline failure");
8035 // FIXME-VT: Clean this up
8036 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8037 GSHAREDVT_FAILURE(*ip);
8038 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8040 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8043 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8045 if (cfg->llvm_only) {
8046 // FIXME: Avoid initializing vtable_arg
8047 emit_llvmonly_calli (cfg, fsig, sp, addr);
8049 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8051 } else if (context_used &&
8052 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8053 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8054 MonoInst *cmethod_addr;
8056 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8058 if (cfg->llvm_only) {
8059 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8060 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8061 emit_llvmonly_calli (cfg, fsig, sp, addr);
8063 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8064 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8066 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8069 INLINE_FAILURE ("ctor call");
8070 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8071 callvirt_this_arg, NULL, vtable_arg);
8078 emit_setret (MonoCompile *cfg, MonoInst *val)
8080 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8083 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8086 if (!cfg->vret_addr) {
8087 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8089 EMIT_NEW_RETLOADA (cfg, ret_addr);
8091 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8092 ins->klass = mono_class_from_mono_type (ret_type);
8095 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8096 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8097 MonoInst *iargs [1];
8101 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8102 mono_arch_emit_setret (cfg, cfg->method, conv);
8104 mono_arch_emit_setret (cfg, cfg->method, val);
8107 mono_arch_emit_setret (cfg, cfg->method, val);
8113 * mono_method_to_ir:
8115 * Translate the .net IL into linear IR.
8118 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8119 MonoInst *return_var, MonoInst **inline_args,
8120 guint inline_offset, gboolean is_virtual_call)
8123 MonoInst *ins, **sp, **stack_start;
8124 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8125 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8126 MonoMethod *cmethod, *method_definition;
8127 MonoInst **arg_array;
8128 MonoMethodHeader *header;
8130 guint32 token, ins_flag;
8132 MonoClass *constrained_class = NULL;
8133 unsigned char *ip, *end, *target, *err_pos;
8134 MonoMethodSignature *sig;
8135 MonoGenericContext *generic_context = NULL;
8136 MonoGenericContainer *generic_container = NULL;
8137 MonoType **param_types;
8138 int i, n, start_new_bblock, dreg;
8139 int num_calls = 0, inline_costs = 0;
8140 int breakpoint_id = 0;
8142 GSList *class_inits = NULL;
8143 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8145 gboolean init_locals, seq_points, skip_dead_blocks;
8146 gboolean sym_seq_points = FALSE;
8147 MonoDebugMethodInfo *minfo;
8148 MonoBitSet *seq_point_locs = NULL;
8149 MonoBitSet *seq_point_set_locs = NULL;
8151 cfg->disable_inline = is_jit_optimizer_disabled (method);
8153 /* serialization and xdomain stuff may need access to private fields and methods */
8154 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8155 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8156 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8157 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8158 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8159 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8161 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8162 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8163 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8164 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8165 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8167 image = method->klass->image;
8168 header = mono_method_get_header (method);
8170 if (mono_loader_get_last_error ()) {
8171 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8172 mono_error_set_from_loader_error (&cfg->error);
8174 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name));
8176 goto exception_exit;
8178 generic_container = mono_method_get_generic_container (method);
8179 sig = mono_method_signature (method);
8180 num_args = sig->hasthis + sig->param_count;
8181 ip = (unsigned char*)header->code;
8182 cfg->cil_start = ip;
8183 end = ip + header->code_size;
8184 cfg->stat_cil_code_size += header->code_size;
8186 seq_points = cfg->gen_seq_points && cfg->method == method;
8188 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8189 /* We could hit a seq point before attaching to the JIT (#8338) */
8193 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8194 minfo = mono_debug_lookup_method (method);
8196 MonoSymSeqPoint *sps;
8197 int i, n_il_offsets;
8199 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8200 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8201 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8202 sym_seq_points = TRUE;
8203 for (i = 0; i < n_il_offsets; ++i) {
8204 if (sps [i].il_offset < header->code_size)
8205 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8208 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8209 /* Methods without line number info like auto-generated property accessors */
8210 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8211 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8212 sym_seq_points = TRUE;
8217 * Methods without init_locals set could cause asserts in various passes
8218 * (#497220). To work around this, we emit dummy initialization opcodes
8219 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8220 * on some platforms.
8222 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8223 init_locals = header->init_locals;
8227 method_definition = method;
8228 while (method_definition->is_inflated) {
8229 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8230 method_definition = imethod->declaring;
8233 /* SkipVerification is not allowed if core-clr is enabled */
8234 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8236 dont_verify_stloc = TRUE;
8239 if (sig->is_inflated)
8240 generic_context = mono_method_get_context (method);
8241 else if (generic_container)
8242 generic_context = &generic_container->context;
8243 cfg->generic_context = generic_context;
8246 g_assert (!sig->has_type_parameters);
8248 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8249 g_assert (method->is_inflated);
8250 g_assert (mono_method_get_context (method)->method_inst);
8252 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8253 g_assert (sig->generic_param_count);
8255 if (cfg->method == method) {
8256 cfg->real_offset = 0;
8258 cfg->real_offset = inline_offset;
8261 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8262 cfg->cil_offset_to_bb_len = header->code_size;
8264 cfg->current_method = method;
8266 if (cfg->verbose_level > 2)
8267 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8269 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8271 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8272 for (n = 0; n < sig->param_count; ++n)
8273 param_types [n + sig->hasthis] = sig->params [n];
8274 cfg->arg_types = param_types;
8276 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8277 if (cfg->method == method) {
8279 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8280 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8283 NEW_BBLOCK (cfg, start_bblock);
8284 cfg->bb_entry = start_bblock;
8285 start_bblock->cil_code = NULL;
8286 start_bblock->cil_length = 0;
8289 NEW_BBLOCK (cfg, end_bblock);
8290 cfg->bb_exit = end_bblock;
8291 end_bblock->cil_code = NULL;
8292 end_bblock->cil_length = 0;
8293 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8294 g_assert (cfg->num_bblocks == 2);
8296 arg_array = cfg->args;
8298 if (header->num_clauses) {
8299 cfg->spvars = g_hash_table_new (NULL, NULL);
8300 cfg->exvars = g_hash_table_new (NULL, NULL);
8302 /* handle exception clauses */
8303 for (i = 0; i < header->num_clauses; ++i) {
8304 MonoBasicBlock *try_bb;
8305 MonoExceptionClause *clause = &header->clauses [i];
8306 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8308 try_bb->real_offset = clause->try_offset;
8309 try_bb->try_start = TRUE;
8310 try_bb->region = ((i + 1) << 8) | clause->flags;
8311 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8312 tblock->real_offset = clause->handler_offset;
8313 tblock->flags |= BB_EXCEPTION_HANDLER;
8316 * Linking the try block with the EH block hinders inlining as we won't be able to
8317 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8319 if (COMPILE_LLVM (cfg))
8320 link_bblock (cfg, try_bb, tblock);
8322 if (*(ip + clause->handler_offset) == CEE_POP)
8323 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8325 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8326 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8327 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8328 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8329 MONO_ADD_INS (tblock, ins);
8331 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8332 /* finally clauses already have a seq point */
8333 /* seq points for filter clauses are emitted below */
8334 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8335 MONO_ADD_INS (tblock, ins);
8338 /* todo: is a fault block unsafe to optimize? */
8339 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8340 tblock->flags |= BB_EXCEPTION_UNSAFE;
8343 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8345 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8347 /* catch and filter blocks get the exception object on the stack */
8348 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8349 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8351 /* mostly like handle_stack_args (), but just sets the input args */
8352 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8353 tblock->in_scount = 1;
8354 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8355 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8359 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8360 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8361 if (!cfg->compile_llvm) {
8362 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8363 ins->dreg = tblock->in_stack [0]->dreg;
8364 MONO_ADD_INS (tblock, ins);
8367 MonoInst *dummy_use;
8370 * Add a dummy use for the exvar so its liveness info will be
8373 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8376 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8377 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8378 MONO_ADD_INS (tblock, ins);
8381 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8382 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8383 tblock->flags |= BB_EXCEPTION_HANDLER;
8384 tblock->real_offset = clause->data.filter_offset;
8385 tblock->in_scount = 1;
8386 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8387 /* The filter block shares the exvar with the handler block */
8388 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8389 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8390 MONO_ADD_INS (tblock, ins);
8394 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8395 clause->data.catch_class &&
8397 mono_class_check_context_used (clause->data.catch_class)) {
8399 * In shared generic code with catch
8400 * clauses containing type variables
8401 * the exception handling code has to
8402 * be able to get to the rgctx.
8403 * Therefore we have to make sure that
8404 * the vtable/mrgctx argument (for
8405 * static or generic methods) or the
8406 * "this" argument (for non-static
8407 * methods) are live.
8409 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8410 mini_method_get_context (method)->method_inst ||
8411 method->klass->valuetype) {
8412 mono_get_vtable_var (cfg);
8414 MonoInst *dummy_use;
8416 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8421 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8422 cfg->cbb = start_bblock;
8423 cfg->args = arg_array;
8424 mono_save_args (cfg, sig, inline_args);
8427 /* FIRST CODE BLOCK */
8428 NEW_BBLOCK (cfg, tblock);
8429 tblock->cil_code = ip;
8433 ADD_BBLOCK (cfg, tblock);
8435 if (cfg->method == method) {
8436 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8437 if (breakpoint_id) {
8438 MONO_INST_NEW (cfg, ins, OP_BREAK);
8439 MONO_ADD_INS (cfg->cbb, ins);
8443 /* we use a separate basic block for the initialization code */
8444 NEW_BBLOCK (cfg, init_localsbb);
8445 cfg->bb_init = init_localsbb;
8446 init_localsbb->real_offset = cfg->real_offset;
8447 start_bblock->next_bb = init_localsbb;
8448 init_localsbb->next_bb = cfg->cbb;
8449 link_bblock (cfg, start_bblock, init_localsbb);
8450 link_bblock (cfg, init_localsbb, cfg->cbb);
8452 cfg->cbb = init_localsbb;
8454 if (cfg->gsharedvt && cfg->method == method) {
8455 MonoGSharedVtMethodInfo *info;
8456 MonoInst *var, *locals_var;
8459 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8460 info->method = cfg->method;
8461 info->count_entries = 16;
8462 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8463 cfg->gsharedvt_info = info;
8465 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8466 /* prevent it from being register allocated */
8467 //var->flags |= MONO_INST_VOLATILE;
8468 cfg->gsharedvt_info_var = var;
8470 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8471 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8473 /* Allocate locals */
8474 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8475 /* prevent it from being register allocated */
8476 //locals_var->flags |= MONO_INST_VOLATILE;
8477 cfg->gsharedvt_locals_var = locals_var;
8479 dreg = alloc_ireg (cfg);
8480 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8482 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8483 ins->dreg = locals_var->dreg;
8485 MONO_ADD_INS (cfg->cbb, ins);
8486 cfg->gsharedvt_locals_var_ins = ins;
8488 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8491 ins->flags |= MONO_INST_INIT;
8495 if (mono_security_core_clr_enabled ()) {
8496 /* check if this is native code, e.g. an icall or a p/invoke */
8497 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8498 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8500 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8501 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8503 /* if this ia a native call then it can only be JITted from platform code */
8504 if ((icall || pinvk) && method->klass && method->klass->image) {
8505 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8506 MonoException *ex = icall ? mono_get_exception_security () :
8507 mono_get_exception_method_access ();
8508 emit_throw_exception (cfg, ex);
8515 CHECK_CFG_EXCEPTION;
8517 if (header->code_size == 0)
8520 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8525 if (cfg->method == method)
8526 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8528 for (n = 0; n < header->num_locals; ++n) {
8529 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8534 /* We force the vtable variable here for all shared methods
8535 for the possibility that they might show up in a stack
8536 trace where their exact instantiation is needed. */
8537 if (cfg->gshared && method == cfg->method) {
8538 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8539 mini_method_get_context (method)->method_inst ||
8540 method->klass->valuetype) {
8541 mono_get_vtable_var (cfg);
8543 /* FIXME: Is there a better way to do this?
8544 We need the variable live for the duration
8545 of the whole method. */
8546 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8550 /* add a check for this != NULL to inlined methods */
8551 if (is_virtual_call) {
8554 NEW_ARGLOAD (cfg, arg_ins, 0);
8555 MONO_ADD_INS (cfg->cbb, arg_ins);
8556 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8559 skip_dead_blocks = !dont_verify;
8560 if (skip_dead_blocks) {
8561 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8566 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8567 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8570 start_new_bblock = 0;
8572 if (cfg->method == method)
8573 cfg->real_offset = ip - header->code;
8575 cfg->real_offset = inline_offset;
8580 if (start_new_bblock) {
8581 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8582 if (start_new_bblock == 2) {
8583 g_assert (ip == tblock->cil_code);
8585 GET_BBLOCK (cfg, tblock, ip);
8587 cfg->cbb->next_bb = tblock;
8589 start_new_bblock = 0;
8590 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8591 if (cfg->verbose_level > 3)
8592 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8593 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8597 g_slist_free (class_inits);
8600 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8601 link_bblock (cfg, cfg->cbb, tblock);
8602 if (sp != stack_start) {
8603 handle_stack_args (cfg, stack_start, sp - stack_start);
8605 CHECK_UNVERIFIABLE (cfg);
8607 cfg->cbb->next_bb = tblock;
8609 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8610 if (cfg->verbose_level > 3)
8611 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8612 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8615 g_slist_free (class_inits);
8620 if (skip_dead_blocks) {
8621 int ip_offset = ip - header->code;
8623 if (ip_offset == bb->end)
8627 int op_size = mono_opcode_size (ip, end);
8628 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8630 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8632 if (ip_offset + op_size == bb->end) {
8633 MONO_INST_NEW (cfg, ins, OP_NOP);
8634 MONO_ADD_INS (cfg->cbb, ins);
8635 start_new_bblock = 1;
8643 * Sequence points are points where the debugger can place a breakpoint.
8644 * Currently, we generate these automatically at points where the IL
8647 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8649 * Make methods interruptable at the beginning, and at the targets of
8650 * backward branches.
8651 * Also, do this at the start of every bblock in methods with clauses too,
8652 * to be able to handle instructions with inprecise control flow like
8654 * Backward branches are handled at the end of method-to-ir ().
8656 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8657 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8659 /* Avoid sequence points on empty IL like .volatile */
8660 // FIXME: Enable this
8661 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8662 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8663 if ((sp != stack_start) && !sym_seq_point)
8664 ins->flags |= MONO_INST_NONEMPTY_STACK;
8665 MONO_ADD_INS (cfg->cbb, ins);
8668 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8671 cfg->cbb->real_offset = cfg->real_offset;
8673 if ((cfg->method == method) && cfg->coverage_info) {
8674 guint32 cil_offset = ip - header->code;
8675 cfg->coverage_info->data [cil_offset].cil_code = ip;
8677 /* TODO: Use an increment here */
8678 #if defined(TARGET_X86)
8679 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8680 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8682 MONO_ADD_INS (cfg->cbb, ins);
8684 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8685 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8689 if (cfg->verbose_level > 3)
8690 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8694 if (seq_points && !sym_seq_points && sp != stack_start) {
8696 * The C# compiler uses these nops to notify the JIT that it should
8697 * insert seq points.
8699 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8700 MONO_ADD_INS (cfg->cbb, ins);
8702 if (cfg->keep_cil_nops)
8703 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8705 MONO_INST_NEW (cfg, ins, OP_NOP);
8707 MONO_ADD_INS (cfg->cbb, ins);
8710 if (should_insert_brekpoint (cfg->method)) {
8711 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8713 MONO_INST_NEW (cfg, ins, OP_NOP);
8716 MONO_ADD_INS (cfg->cbb, ins);
8722 CHECK_STACK_OVF (1);
8723 n = (*ip)-CEE_LDARG_0;
8725 EMIT_NEW_ARGLOAD (cfg, ins, n);
8733 CHECK_STACK_OVF (1);
8734 n = (*ip)-CEE_LDLOC_0;
8736 EMIT_NEW_LOCLOAD (cfg, ins, n);
8745 n = (*ip)-CEE_STLOC_0;
8748 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8750 emit_stloc_ir (cfg, sp, header, n);
8757 CHECK_STACK_OVF (1);
8760 EMIT_NEW_ARGLOAD (cfg, ins, n);
8766 CHECK_STACK_OVF (1);
8769 NEW_ARGLOADA (cfg, ins, n);
8770 MONO_ADD_INS (cfg->cbb, ins);
8780 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8782 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8787 CHECK_STACK_OVF (1);
8790 EMIT_NEW_LOCLOAD (cfg, ins, n);
8794 case CEE_LDLOCA_S: {
8795 unsigned char *tmp_ip;
8797 CHECK_STACK_OVF (1);
8798 CHECK_LOCAL (ip [1]);
8800 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8806 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8815 CHECK_LOCAL (ip [1]);
8816 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8818 emit_stloc_ir (cfg, sp, header, ip [1]);
8823 CHECK_STACK_OVF (1);
8824 EMIT_NEW_PCONST (cfg, ins, NULL);
8825 ins->type = STACK_OBJ;
8830 CHECK_STACK_OVF (1);
8831 EMIT_NEW_ICONST (cfg, ins, -1);
8844 CHECK_STACK_OVF (1);
8845 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8851 CHECK_STACK_OVF (1);
8853 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8859 CHECK_STACK_OVF (1);
8860 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8866 CHECK_STACK_OVF (1);
8867 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8868 ins->type = STACK_I8;
8869 ins->dreg = alloc_dreg (cfg, STACK_I8);
8871 ins->inst_l = (gint64)read64 (ip);
8872 MONO_ADD_INS (cfg->cbb, ins);
8878 gboolean use_aotconst = FALSE;
8880 #ifdef TARGET_POWERPC
8881 /* FIXME: Clean this up */
8882 if (cfg->compile_aot)
8883 use_aotconst = TRUE;
8886 /* FIXME: we should really allocate this only late in the compilation process */
8887 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8889 CHECK_STACK_OVF (1);
8895 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8897 dreg = alloc_freg (cfg);
8898 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8899 ins->type = cfg->r4_stack_type;
8901 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8902 ins->type = cfg->r4_stack_type;
8903 ins->dreg = alloc_dreg (cfg, STACK_R8);
8905 MONO_ADD_INS (cfg->cbb, ins);
8915 gboolean use_aotconst = FALSE;
8917 #ifdef TARGET_POWERPC
8918 /* FIXME: Clean this up */
8919 if (cfg->compile_aot)
8920 use_aotconst = TRUE;
8923 /* FIXME: we should really allocate this only late in the compilation process */
8924 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8926 CHECK_STACK_OVF (1);
8932 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8934 dreg = alloc_freg (cfg);
8935 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8936 ins->type = STACK_R8;
8938 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8939 ins->type = STACK_R8;
8940 ins->dreg = alloc_dreg (cfg, STACK_R8);
8942 MONO_ADD_INS (cfg->cbb, ins);
8951 MonoInst *temp, *store;
8953 CHECK_STACK_OVF (1);
8957 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8958 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8960 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8963 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8976 if (sp [0]->type == STACK_R8)
8977 /* we need to pop the value from the x86 FP stack */
8978 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8983 MonoMethodSignature *fsig;
8986 INLINE_FAILURE ("jmp");
8987 GSHAREDVT_FAILURE (*ip);
8990 if (stack_start != sp)
8992 token = read32 (ip + 1);
8993 /* FIXME: check the signature matches */
8994 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8997 if (cfg->gshared && mono_method_check_context_used (cmethod))
8998 GENERIC_SHARING_FAILURE (CEE_JMP);
9000 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9002 fsig = mono_method_signature (cmethod);
9003 n = fsig->param_count + fsig->hasthis;
9004 if (cfg->llvm_only) {
9007 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9008 for (i = 0; i < n; ++i)
9009 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9010 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9012 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9013 * have to emit a normal return since llvm expects it.
9016 emit_setret (cfg, ins);
9017 MONO_INST_NEW (cfg, ins, OP_BR);
9018 ins->inst_target_bb = end_bblock;
9019 MONO_ADD_INS (cfg->cbb, ins);
9020 link_bblock (cfg, cfg->cbb, end_bblock);
9023 } else if (cfg->backend->have_op_tail_call) {
9024 /* Handle tail calls similarly to calls */
9027 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9028 call->method = cmethod;
9029 call->tail_call = TRUE;
9030 call->signature = mono_method_signature (cmethod);
9031 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9032 call->inst.inst_p0 = cmethod;
9033 for (i = 0; i < n; ++i)
9034 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9036 mono_arch_emit_call (cfg, call);
9037 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9038 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9040 for (i = 0; i < num_args; ++i)
9041 /* Prevent arguments from being optimized away */
9042 arg_array [i]->flags |= MONO_INST_VOLATILE;
9044 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9045 ins = (MonoInst*)call;
9046 ins->inst_p0 = cmethod;
9047 MONO_ADD_INS (cfg->cbb, ins);
9051 start_new_bblock = 1;
9056 MonoMethodSignature *fsig;
9059 token = read32 (ip + 1);
9063 //GSHAREDVT_FAILURE (*ip);
9068 fsig = mini_get_signature (method, token, generic_context);
9070 if (method->dynamic && fsig->pinvoke) {
9074 * This is a call through a function pointer using a pinvoke
9075 * signature. Have to create a wrapper and call that instead.
9076 * FIXME: This is very slow, need to create a wrapper at JIT time
9077 * instead based on the signature.
9079 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9080 EMIT_NEW_PCONST (cfg, args [1], fsig);
9082 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9085 n = fsig->param_count + fsig->hasthis;
9089 //g_assert (!virtual_ || fsig->hasthis);
9093 inline_costs += 10 * num_calls++;
9096 * Making generic calls out of gsharedvt methods.
9097 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9098 * patching gshared method addresses into a gsharedvt method.
9100 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9102 * We pass the address to the gsharedvt trampoline in the rgctx reg
9104 MonoInst *callee = addr;
9106 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9108 GSHAREDVT_FAILURE (*ip);
9112 GSHAREDVT_FAILURE (*ip);
9114 addr = emit_get_rgctx_sig (cfg, context_used,
9115 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9116 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9120 /* Prevent inlining of methods with indirect calls */
9121 INLINE_FAILURE ("indirect call");
9123 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9124 MonoJumpInfoType info_type;
9128 * Instead of emitting an indirect call, emit a direct call
9129 * with the contents of the aotconst as the patch info.
9131 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9132 info_type = (MonoJumpInfoType)addr->inst_c1;
9133 info_data = addr->inst_p0;
9135 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9136 info_data = addr->inst_right->inst_left;
9139 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9140 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9145 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9149 /* End of call, INS should contain the result of the call, if any */
9151 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9153 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9156 CHECK_CFG_EXCEPTION;
9160 constrained_class = NULL;
9164 case CEE_CALLVIRT: {
9165 MonoInst *addr = NULL;
9166 MonoMethodSignature *fsig = NULL;
9168 int virtual_ = *ip == CEE_CALLVIRT;
9169 gboolean pass_imt_from_rgctx = FALSE;
9170 MonoInst *imt_arg = NULL;
9171 MonoInst *keep_this_alive = NULL;
9172 gboolean pass_vtable = FALSE;
9173 gboolean pass_mrgctx = FALSE;
9174 MonoInst *vtable_arg = NULL;
9175 gboolean check_this = FALSE;
9176 gboolean supported_tail_call = FALSE;
9177 gboolean tail_call = FALSE;
9178 gboolean need_seq_point = FALSE;
9179 guint32 call_opcode = *ip;
9180 gboolean emit_widen = TRUE;
9181 gboolean push_res = TRUE;
9182 gboolean skip_ret = FALSE;
9183 gboolean delegate_invoke = FALSE;
9184 gboolean direct_icall = FALSE;
9185 gboolean constrained_partial_call = FALSE;
9186 MonoMethod *cil_method;
9189 token = read32 (ip + 1);
9193 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9196 cil_method = cmethod;
9198 if (constrained_class) {
9199 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9200 if (!mini_is_gsharedvt_klass (constrained_class)) {
9201 g_assert (!cmethod->klass->valuetype);
9202 if (!mini_type_is_reference (&constrained_class->byval_arg))
9203 constrained_partial_call = TRUE;
9207 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9208 if (cfg->verbose_level > 2)
9209 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9210 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9211 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9213 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9217 if (cfg->verbose_level > 2)
9218 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9220 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9222 * This is needed since get_method_constrained can't find
9223 * the method in klass representing a type var.
9224 * The type var is guaranteed to be a reference type in this
9227 if (!mini_is_gsharedvt_klass (constrained_class))
9228 g_assert (!cmethod->klass->valuetype);
9230 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9236 if (!cmethod || mono_loader_get_last_error ()) {
9237 if (mono_loader_get_last_error ()) {
9238 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9239 mono_error_set_from_loader_error (&cfg->error);
9245 if (!dont_verify && !cfg->skip_visibility) {
9246 MonoMethod *target_method = cil_method;
9247 if (method->is_inflated) {
9248 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9251 if (!mono_method_can_access_method (method_definition, target_method) &&
9252 !mono_method_can_access_method (method, cil_method))
9253 METHOD_ACCESS_FAILURE (method, cil_method);
9256 if (mono_security_core_clr_enabled ())
9257 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9259 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9260 /* MS.NET seems to silently convert this to a callvirt */
9265 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9266 * converts to a callvirt.
9268 * tests/bug-515884.il is an example of this behavior
9270 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9271 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9272 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9276 if (!cmethod->klass->inited)
9277 if (!mono_class_init (cmethod->klass))
9278 TYPE_LOAD_ERROR (cmethod->klass);
9280 fsig = mono_method_signature (cmethod);
9283 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9284 mini_class_is_system_array (cmethod->klass)) {
9285 array_rank = cmethod->klass->rank;
9286 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9287 direct_icall = TRUE;
9288 } else if (fsig->pinvoke) {
9289 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9290 fsig = mono_method_signature (wrapper);
9291 } else if (constrained_class) {
9293 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9297 if (cfg->llvm_only && !cfg->method->wrapper_type)
9298 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9300 /* See code below */
9301 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9302 MonoBasicBlock *tbb;
9304 GET_BBLOCK (cfg, tbb, ip + 5);
9305 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9307 * We want to extend the try block to cover the call, but we can't do it if the
9308 * call is made directly since its followed by an exception check.
9310 direct_icall = FALSE;
9314 mono_save_token_info (cfg, image, token, cil_method);
9316 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9317 need_seq_point = TRUE;
9319 /* Don't support calls made using type arguments for now */
9321 if (cfg->gsharedvt) {
9322 if (mini_is_gsharedvt_signature (fsig))
9323 GSHAREDVT_FAILURE (*ip);
9327 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9328 g_assert_not_reached ();
9330 n = fsig->param_count + fsig->hasthis;
9332 if (!cfg->gshared && cmethod->klass->generic_container)
9336 g_assert (!mono_method_check_context_used (cmethod));
9340 //g_assert (!virtual_ || fsig->hasthis);
9345 * We have the `constrained.' prefix opcode.
9347 if (constrained_class) {
9348 if (mini_is_gsharedvt_klass (constrained_class)) {
9349 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9350 /* The 'Own method' case below */
9351 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9352 /* 'The type parameter is instantiated as a reference type' case below. */
9354 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9355 CHECK_CFG_EXCEPTION;
9361 if (constrained_partial_call) {
9362 gboolean need_box = TRUE;
9365 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9366 * called method is not known at compile time either. The called method could end up being
9367 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9368 * to box the receiver.
9369 * A simple solution would be to box always and make a normal virtual call, but that would
9370 * be bad performance wise.
9372 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9374 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9379 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9380 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9381 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9382 ins->klass = constrained_class;
9383 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9384 CHECK_CFG_EXCEPTION;
9385 } else if (need_box) {
9387 MonoBasicBlock *is_ref_bb, *end_bb;
9388 MonoInst *nonbox_call;
9391 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9393 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9394 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9396 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9398 NEW_BBLOCK (cfg, is_ref_bb);
9399 NEW_BBLOCK (cfg, end_bb);
9401 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9406 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9408 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9411 MONO_START_BB (cfg, is_ref_bb);
9412 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9413 ins->klass = constrained_class;
9414 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9415 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9417 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9419 MONO_START_BB (cfg, end_bb);
9422 nonbox_call->dreg = ins->dreg;
9425 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9426 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9427 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9430 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9432 * The type parameter is instantiated as a valuetype,
9433 * but that type doesn't override the method we're
9434 * calling, so we need to box `this'.
9436 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9437 ins->klass = constrained_class;
9438 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9439 CHECK_CFG_EXCEPTION;
9440 } else if (!constrained_class->valuetype) {
9441 int dreg = alloc_ireg_ref (cfg);
9444 * The type parameter is instantiated as a reference
9445 * type. We have a managed pointer on the stack, so
9446 * we need to dereference it here.
9448 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9449 ins->type = STACK_OBJ;
9452 if (cmethod->klass->valuetype) {
9455 /* Interface method */
9458 mono_class_setup_vtable (constrained_class);
9459 CHECK_TYPELOAD (constrained_class);
9460 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9462 TYPE_LOAD_ERROR (constrained_class);
9463 slot = mono_method_get_vtable_slot (cmethod);
9465 TYPE_LOAD_ERROR (cmethod->klass);
9466 cmethod = constrained_class->vtable [ioffset + slot];
9468 if (cmethod->klass == mono_defaults.enum_class) {
9469 /* Enum implements some interfaces, so treat this as the first case */
9470 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9471 ins->klass = constrained_class;
9472 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9473 CHECK_CFG_EXCEPTION;
9478 constrained_class = NULL;
9481 if (check_call_signature (cfg, fsig, sp))
9484 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9485 delegate_invoke = TRUE;
9487 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9488 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9489 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9497 * If the callee is a shared method, then its static cctor
9498 * might not get called after the call was patched.
9500 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9501 emit_class_init (cfg, cmethod->klass);
9502 CHECK_TYPELOAD (cmethod->klass);
9505 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9508 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9510 context_used = mini_method_check_context_used (cfg, cmethod);
9512 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9513 /* Generic method interface
9514 calls are resolved via a
9515 helper function and don't
9517 if (!cmethod_context || !cmethod_context->method_inst)
9518 pass_imt_from_rgctx = TRUE;
9522 * If a shared method calls another
9523 * shared method then the caller must
9524 * have a generic sharing context
9525 * because the magic trampoline
9526 * requires it. FIXME: We shouldn't
9527 * have to force the vtable/mrgctx
9528 * variable here. Instead there
9529 * should be a flag in the cfg to
9530 * request a generic sharing context.
9533 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9534 mono_get_vtable_var (cfg);
9539 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9541 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9543 CHECK_TYPELOAD (cmethod->klass);
9544 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9549 g_assert (!vtable_arg);
9551 if (!cfg->compile_aot) {
9553 * emit_get_rgctx_method () calls mono_class_vtable () so check
9554 * for type load errors before.
9556 mono_class_setup_vtable (cmethod->klass);
9557 CHECK_TYPELOAD (cmethod->klass);
9560 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9562 /* !marshalbyref is needed to properly handle generic methods + remoting */
9563 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9564 MONO_METHOD_IS_FINAL (cmethod)) &&
9565 !mono_class_is_marshalbyref (cmethod->klass)) {
9572 if (pass_imt_from_rgctx) {
9573 g_assert (!pass_vtable);
9575 imt_arg = emit_get_rgctx_method (cfg, context_used,
9576 cmethod, MONO_RGCTX_INFO_METHOD);
9580 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9582 /* Calling virtual generic methods */
9583 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9584 !(MONO_METHOD_IS_FINAL (cmethod) &&
9585 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9586 fsig->generic_param_count &&
9587 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9589 MonoInst *this_temp, *this_arg_temp, *store;
9590 MonoInst *iargs [4];
9592 g_assert (fsig->is_inflated);
9594 /* Prevent inlining of methods that contain indirect calls */
9595 INLINE_FAILURE ("virtual generic call");
9597 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9598 GSHAREDVT_FAILURE (*ip);
9600 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9601 g_assert (!imt_arg);
9603 g_assert (cmethod->is_inflated);
9604 imt_arg = emit_get_rgctx_method (cfg, context_used,
9605 cmethod, MONO_RGCTX_INFO_METHOD);
9606 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9608 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9609 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9610 MONO_ADD_INS (cfg->cbb, store);
9612 /* FIXME: This should be a managed pointer */
9613 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9615 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9616 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9617 cmethod, MONO_RGCTX_INFO_METHOD);
9618 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9619 addr = mono_emit_jit_icall (cfg,
9620 mono_helper_compile_generic_method, iargs);
9622 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9624 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9631 * Implement a workaround for the inherent races involved in locking:
9637 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9638 * try block, the Exit () won't be executed, see:
9639 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9640 * To work around this, we extend such try blocks to include the last x bytes
9641 * of the Monitor.Enter () call.
9643 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9644 MonoBasicBlock *tbb;
9646 GET_BBLOCK (cfg, tbb, ip + 5);
9648 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9649 * from Monitor.Enter like ArgumentNullException.
9651 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9652 /* Mark this bblock as needing to be extended */
9653 tbb->extend_try_block = TRUE;
9657 /* Conversion to a JIT intrinsic */
9658 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9659 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9660 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9667 if ((cfg->opt & MONO_OPT_INLINE) &&
9668 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9669 mono_method_check_inlining (cfg, cmethod)) {
9671 gboolean always = FALSE;
9673 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9674 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9675 /* Prevent inlining of methods that call wrappers */
9676 INLINE_FAILURE ("wrapper call");
9677 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9681 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9683 cfg->real_offset += 5;
9685 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9686 /* *sp is already set by inline_method */
9691 inline_costs += costs;
9697 /* Tail recursion elimination */
9698 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9699 gboolean has_vtargs = FALSE;
9702 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9703 INLINE_FAILURE ("tail call");
9705 /* keep it simple */
9706 for (i = fsig->param_count - 1; i >= 0; i--) {
9707 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9712 for (i = 0; i < n; ++i)
9713 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9714 MONO_INST_NEW (cfg, ins, OP_BR);
9715 MONO_ADD_INS (cfg->cbb, ins);
9716 tblock = start_bblock->out_bb [0];
9717 link_bblock (cfg, cfg->cbb, tblock);
9718 ins->inst_target_bb = tblock;
9719 start_new_bblock = 1;
9721 /* skip the CEE_RET, too */
9722 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9729 inline_costs += 10 * num_calls++;
9732 * Making generic calls out of gsharedvt methods.
9733 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9734 * patching gshared method addresses into a gsharedvt method.
9736 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9737 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9738 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9739 MonoRgctxInfoType info_type;
9742 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9743 //GSHAREDVT_FAILURE (*ip);
9744 // disable for possible remoting calls
9745 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9746 GSHAREDVT_FAILURE (*ip);
9747 if (fsig->generic_param_count) {
9748 /* virtual generic call */
9749 g_assert (!imt_arg);
9750 /* Same as the virtual generic case above */
9751 imt_arg = emit_get_rgctx_method (cfg, context_used,
9752 cmethod, MONO_RGCTX_INFO_METHOD);
9753 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9755 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9756 /* This can happen when we call a fully instantiated iface method */
9757 imt_arg = emit_get_rgctx_method (cfg, context_used,
9758 cmethod, MONO_RGCTX_INFO_METHOD);
9763 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9764 keep_this_alive = sp [0];
9766 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9767 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9769 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9770 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9772 if (cfg->llvm_only) {
9773 // FIXME: Avoid initializing vtable_arg
9774 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9776 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9781 /* Generic sharing */
9784 * Use this if the callee is gsharedvt sharable too, since
9785 * at runtime we might find an instantiation so the call cannot
9786 * be patched (the 'no_patch' code path in mini-trampolines.c).
9788 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9789 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9790 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9791 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9792 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9793 INLINE_FAILURE ("gshared");
9795 g_assert (cfg->gshared && cmethod);
9799 * We are compiling a call to a
9800 * generic method from shared code,
9801 * which means that we have to look up
9802 * the method in the rgctx and do an
9806 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9808 if (cfg->llvm_only) {
9809 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9810 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9812 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9813 // FIXME: Avoid initializing imt_arg/vtable_arg
9814 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9816 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9817 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9822 /* Direct calls to icalls */
9824 MonoMethod *wrapper;
9827 /* Inline the wrapper */
9828 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9830 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9831 g_assert (costs > 0);
9832 cfg->real_offset += 5;
9834 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9835 /* *sp is already set by inline_method */
9840 inline_costs += costs;
9849 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9850 MonoInst *val = sp [fsig->param_count];
9852 if (val->type == STACK_OBJ) {
9853 MonoInst *iargs [2];
9858 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9861 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9862 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9863 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9864 emit_write_barrier (cfg, addr, val);
9865 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9866 GSHAREDVT_FAILURE (*ip);
9867 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9868 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9870 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9871 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9872 if (!cmethod->klass->element_class->valuetype && !readonly)
9873 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9874 CHECK_TYPELOAD (cmethod->klass);
9877 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9880 g_assert_not_reached ();
9887 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9891 /* Tail prefix / tail call optimization */
9893 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9894 /* FIXME: runtime generic context pointer for jumps? */
9895 /* FIXME: handle this for generic sharing eventually */
9896 if ((ins_flag & MONO_INST_TAILCALL) &&
9897 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9898 supported_tail_call = TRUE;
9900 if (supported_tail_call) {
9903 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9904 INLINE_FAILURE ("tail call");
9906 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9908 if (cfg->backend->have_op_tail_call) {
9909 /* Handle tail calls similarly to normal calls */
9912 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9914 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9915 call->tail_call = TRUE;
9916 call->method = cmethod;
9917 call->signature = mono_method_signature (cmethod);
9920 * We implement tail calls by storing the actual arguments into the
9921 * argument variables, then emitting a CEE_JMP.
9923 for (i = 0; i < n; ++i) {
9924 /* Prevent argument from being register allocated */
9925 arg_array [i]->flags |= MONO_INST_VOLATILE;
9926 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9928 ins = (MonoInst*)call;
9929 ins->inst_p0 = cmethod;
9930 ins->inst_p1 = arg_array [0];
9931 MONO_ADD_INS (cfg->cbb, ins);
9932 link_bblock (cfg, cfg->cbb, end_bblock);
9933 start_new_bblock = 1;
9935 // FIXME: Eliminate unreachable epilogs
9938 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9939 * only reachable from this call.
9941 GET_BBLOCK (cfg, tblock, ip + 5);
9942 if (tblock == cfg->cbb || tblock->in_count == 0)
9951 * Synchronized wrappers.
9952 * Its hard to determine where to replace a method with its synchronized
9953 * wrapper without causing an infinite recursion. The current solution is
9954 * to add the synchronized wrapper in the trampolines, and to
9955 * change the called method to a dummy wrapper, and resolve that wrapper
9956 * to the real method in mono_jit_compile_method ().
9958 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9959 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9960 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9961 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9965 * Virtual calls in llvm-only mode.
9967 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9968 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9973 INLINE_FAILURE ("call");
9974 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9975 imt_arg, vtable_arg);
9977 if (tail_call && !cfg->llvm_only) {
9978 link_bblock (cfg, cfg->cbb, end_bblock);
9979 start_new_bblock = 1;
9981 // FIXME: Eliminate unreachable epilogs
9984 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9985 * only reachable from this call.
9987 GET_BBLOCK (cfg, tblock, ip + 5);
9988 if (tblock == cfg->cbb || tblock->in_count == 0)
9995 /* End of call, INS should contain the result of the call, if any */
9997 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10000 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10005 if (keep_this_alive) {
10006 MonoInst *dummy_use;
10008 /* See mono_emit_method_call_full () */
10009 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10012 CHECK_CFG_EXCEPTION;
10016 g_assert (*ip == CEE_RET);
10020 constrained_class = NULL;
10021 if (need_seq_point)
10022 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10026 if (cfg->method != method) {
10027 /* return from inlined method */
10029 * If in_count == 0, that means the ret is unreachable due to
10030 * being preceeded by a throw. In that case, inline_method () will
10031 * handle setting the return value
10032 * (test case: test_0_inline_throw ()).
10034 if (return_var && cfg->cbb->in_count) {
10035 MonoType *ret_type = mono_method_signature (method)->ret;
10041 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10044 //g_assert (returnvar != -1);
10045 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10046 cfg->ret_var_set = TRUE;
10049 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10051 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10052 emit_pop_lmf (cfg);
10055 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10057 if (seq_points && !sym_seq_points) {
10059 * Place a seq point here too even through the IL stack is not
10060 * empty, so a step over on
10063 * will work correctly.
10065 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10066 MONO_ADD_INS (cfg->cbb, ins);
10069 g_assert (!return_var);
10073 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10076 emit_setret (cfg, *sp);
10079 if (sp != stack_start)
10081 MONO_INST_NEW (cfg, ins, OP_BR);
10083 ins->inst_target_bb = end_bblock;
10084 MONO_ADD_INS (cfg->cbb, ins);
10085 link_bblock (cfg, cfg->cbb, end_bblock);
10086 start_new_bblock = 1;
10090 MONO_INST_NEW (cfg, ins, OP_BR);
10092 target = ip + 1 + (signed char)(*ip);
10094 GET_BBLOCK (cfg, tblock, target);
10095 link_bblock (cfg, cfg->cbb, tblock);
10096 ins->inst_target_bb = tblock;
10097 if (sp != stack_start) {
10098 handle_stack_args (cfg, stack_start, sp - stack_start);
10100 CHECK_UNVERIFIABLE (cfg);
10102 MONO_ADD_INS (cfg->cbb, ins);
10103 start_new_bblock = 1;
10104 inline_costs += BRANCH_COST;
10118 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10120 target = ip + 1 + *(signed char*)ip;
10123 ADD_BINCOND (NULL);
10126 inline_costs += BRANCH_COST;
10130 MONO_INST_NEW (cfg, ins, OP_BR);
10133 target = ip + 4 + (gint32)read32(ip);
10135 GET_BBLOCK (cfg, tblock, target);
10136 link_bblock (cfg, cfg->cbb, tblock);
10137 ins->inst_target_bb = tblock;
10138 if (sp != stack_start) {
10139 handle_stack_args (cfg, stack_start, sp - stack_start);
10141 CHECK_UNVERIFIABLE (cfg);
10144 MONO_ADD_INS (cfg->cbb, ins);
10146 start_new_bblock = 1;
10147 inline_costs += BRANCH_COST;
10149 case CEE_BRFALSE_S:
10154 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10155 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10156 guint32 opsize = is_short ? 1 : 4;
10158 CHECK_OPSIZE (opsize);
10160 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10163 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10168 GET_BBLOCK (cfg, tblock, target);
10169 link_bblock (cfg, cfg->cbb, tblock);
10170 GET_BBLOCK (cfg, tblock, ip);
10171 link_bblock (cfg, cfg->cbb, tblock);
10173 if (sp != stack_start) {
10174 handle_stack_args (cfg, stack_start, sp - stack_start);
10175 CHECK_UNVERIFIABLE (cfg);
10178 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10179 cmp->sreg1 = sp [0]->dreg;
10180 type_from_op (cfg, cmp, sp [0], NULL);
10183 #if SIZEOF_REGISTER == 4
10184 if (cmp->opcode == OP_LCOMPARE_IMM) {
10185 /* Convert it to OP_LCOMPARE */
10186 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10187 ins->type = STACK_I8;
10188 ins->dreg = alloc_dreg (cfg, STACK_I8);
10190 MONO_ADD_INS (cfg->cbb, ins);
10191 cmp->opcode = OP_LCOMPARE;
10192 cmp->sreg2 = ins->dreg;
10195 MONO_ADD_INS (cfg->cbb, cmp);
10197 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10198 type_from_op (cfg, ins, sp [0], NULL);
10199 MONO_ADD_INS (cfg->cbb, ins);
10200 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10201 GET_BBLOCK (cfg, tblock, target);
10202 ins->inst_true_bb = tblock;
10203 GET_BBLOCK (cfg, tblock, ip);
10204 ins->inst_false_bb = tblock;
10205 start_new_bblock = 2;
10208 inline_costs += BRANCH_COST;
10223 MONO_INST_NEW (cfg, ins, *ip);
10225 target = ip + 4 + (gint32)read32(ip);
10228 ADD_BINCOND (NULL);
10231 inline_costs += BRANCH_COST;
10235 MonoBasicBlock **targets;
10236 MonoBasicBlock *default_bblock;
10237 MonoJumpInfoBBTable *table;
10238 int offset_reg = alloc_preg (cfg);
10239 int target_reg = alloc_preg (cfg);
10240 int table_reg = alloc_preg (cfg);
10241 int sum_reg = alloc_preg (cfg);
10242 gboolean use_op_switch;
10246 n = read32 (ip + 1);
10249 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10253 CHECK_OPSIZE (n * sizeof (guint32));
10254 target = ip + n * sizeof (guint32);
10256 GET_BBLOCK (cfg, default_bblock, target);
10257 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10259 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10260 for (i = 0; i < n; ++i) {
10261 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10262 targets [i] = tblock;
10263 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10267 if (sp != stack_start) {
10269 * Link the current bb with the targets as well, so handle_stack_args
10270 * will set their in_stack correctly.
10272 link_bblock (cfg, cfg->cbb, default_bblock);
10273 for (i = 0; i < n; ++i)
10274 link_bblock (cfg, cfg->cbb, targets [i]);
10276 handle_stack_args (cfg, stack_start, sp - stack_start);
10278 CHECK_UNVERIFIABLE (cfg);
10280 /* Undo the links */
10281 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10282 for (i = 0; i < n; ++i)
10283 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10287 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10289 for (i = 0; i < n; ++i)
10290 link_bblock (cfg, cfg->cbb, targets [i]);
10292 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10293 table->table = targets;
10294 table->table_size = n;
10296 use_op_switch = FALSE;
10298 /* ARM implements SWITCH statements differently */
10299 /* FIXME: Make it use the generic implementation */
10300 if (!cfg->compile_aot)
10301 use_op_switch = TRUE;
10304 if (COMPILE_LLVM (cfg))
10305 use_op_switch = TRUE;
10307 cfg->cbb->has_jump_table = 1;
10309 if (use_op_switch) {
10310 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10311 ins->sreg1 = src1->dreg;
10312 ins->inst_p0 = table;
10313 ins->inst_many_bb = targets;
10314 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10315 MONO_ADD_INS (cfg->cbb, ins);
10317 if (sizeof (gpointer) == 8)
10318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10322 #if SIZEOF_REGISTER == 8
10323 /* The upper word might not be zero, and we add it to a 64 bit address later */
10324 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10327 if (cfg->compile_aot) {
10328 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10330 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10331 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10332 ins->inst_p0 = table;
10333 ins->dreg = table_reg;
10334 MONO_ADD_INS (cfg->cbb, ins);
10337 /* FIXME: Use load_memindex */
10338 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10339 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10340 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10342 start_new_bblock = 1;
10343 inline_costs += (BRANCH_COST * 2);
10356 case CEE_LDIND_REF:
10363 dreg = alloc_freg (cfg);
10366 dreg = alloc_lreg (cfg);
10368 case CEE_LDIND_REF:
10369 dreg = alloc_ireg_ref (cfg);
10372 dreg = alloc_preg (cfg);
10375 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10376 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10377 if (*ip == CEE_LDIND_R4)
10378 ins->type = cfg->r4_stack_type;
10379 ins->flags |= ins_flag;
10380 MONO_ADD_INS (cfg->cbb, ins);
10382 if (ins_flag & MONO_INST_VOLATILE) {
10383 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10384 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10389 case CEE_STIND_REF:
10400 if (ins_flag & MONO_INST_VOLATILE) {
10401 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10402 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10405 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10406 ins->flags |= ins_flag;
10409 MONO_ADD_INS (cfg->cbb, ins);
10411 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10412 emit_write_barrier (cfg, sp [0], sp [1]);
10421 MONO_INST_NEW (cfg, ins, (*ip));
10423 ins->sreg1 = sp [0]->dreg;
10424 ins->sreg2 = sp [1]->dreg;
10425 type_from_op (cfg, ins, sp [0], sp [1]);
10427 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10429 /* Use the immediate opcodes if possible */
10430 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10431 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10432 if (imm_opcode != -1) {
10433 ins->opcode = imm_opcode;
10434 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10437 NULLIFY_INS (sp [1]);
10441 MONO_ADD_INS ((cfg)->cbb, (ins));
10443 *sp++ = mono_decompose_opcode (cfg, ins);
10460 MONO_INST_NEW (cfg, ins, (*ip));
10462 ins->sreg1 = sp [0]->dreg;
10463 ins->sreg2 = sp [1]->dreg;
10464 type_from_op (cfg, ins, sp [0], sp [1]);
10466 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10467 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10469 /* FIXME: Pass opcode to is_inst_imm */
10471 /* Use the immediate opcodes if possible */
10472 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10473 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10474 if (imm_opcode != -1) {
10475 ins->opcode = imm_opcode;
10476 if (sp [1]->opcode == OP_I8CONST) {
10477 #if SIZEOF_REGISTER == 8
10478 ins->inst_imm = sp [1]->inst_l;
10480 ins->inst_ls_word = sp [1]->inst_ls_word;
10481 ins->inst_ms_word = sp [1]->inst_ms_word;
10485 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10488 /* Might be followed by an instruction added by add_widen_op */
10489 if (sp [1]->next == NULL)
10490 NULLIFY_INS (sp [1]);
10493 MONO_ADD_INS ((cfg)->cbb, (ins));
10495 *sp++ = mono_decompose_opcode (cfg, ins);
10508 case CEE_CONV_OVF_I8:
10509 case CEE_CONV_OVF_U8:
10510 case CEE_CONV_R_UN:
10513 /* Special case this earlier so we have long constants in the IR */
10514 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10515 int data = sp [-1]->inst_c0;
10516 sp [-1]->opcode = OP_I8CONST;
10517 sp [-1]->type = STACK_I8;
10518 #if SIZEOF_REGISTER == 8
10519 if ((*ip) == CEE_CONV_U8)
10520 sp [-1]->inst_c0 = (guint32)data;
10522 sp [-1]->inst_c0 = data;
10524 sp [-1]->inst_ls_word = data;
10525 if ((*ip) == CEE_CONV_U8)
10526 sp [-1]->inst_ms_word = 0;
10528 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10530 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10537 case CEE_CONV_OVF_I4:
10538 case CEE_CONV_OVF_I1:
10539 case CEE_CONV_OVF_I2:
10540 case CEE_CONV_OVF_I:
10541 case CEE_CONV_OVF_U:
10544 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10545 ADD_UNOP (CEE_CONV_OVF_I8);
10552 case CEE_CONV_OVF_U1:
10553 case CEE_CONV_OVF_U2:
10554 case CEE_CONV_OVF_U4:
10557 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10558 ADD_UNOP (CEE_CONV_OVF_U8);
10565 case CEE_CONV_OVF_I1_UN:
10566 case CEE_CONV_OVF_I2_UN:
10567 case CEE_CONV_OVF_I4_UN:
10568 case CEE_CONV_OVF_I8_UN:
10569 case CEE_CONV_OVF_U1_UN:
10570 case CEE_CONV_OVF_U2_UN:
10571 case CEE_CONV_OVF_U4_UN:
10572 case CEE_CONV_OVF_U8_UN:
10573 case CEE_CONV_OVF_I_UN:
10574 case CEE_CONV_OVF_U_UN:
10581 CHECK_CFG_EXCEPTION;
10585 case CEE_ADD_OVF_UN:
10587 case CEE_MUL_OVF_UN:
10589 case CEE_SUB_OVF_UN:
10595 GSHAREDVT_FAILURE (*ip);
10598 token = read32 (ip + 1);
10599 klass = mini_get_class (method, token, generic_context);
10600 CHECK_TYPELOAD (klass);
10602 if (generic_class_is_reference_type (cfg, klass)) {
10603 MonoInst *store, *load;
10604 int dreg = alloc_ireg_ref (cfg);
10606 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10607 load->flags |= ins_flag;
10608 MONO_ADD_INS (cfg->cbb, load);
10610 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10611 store->flags |= ins_flag;
10612 MONO_ADD_INS (cfg->cbb, store);
10614 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10615 emit_write_barrier (cfg, sp [0], sp [1]);
10617 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10623 int loc_index = -1;
10629 token = read32 (ip + 1);
10630 klass = mini_get_class (method, token, generic_context);
10631 CHECK_TYPELOAD (klass);
10633 /* Optimize the common ldobj+stloc combination */
10636 loc_index = ip [6];
10643 loc_index = ip [5] - CEE_STLOC_0;
10650 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10651 CHECK_LOCAL (loc_index);
10653 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10654 ins->dreg = cfg->locals [loc_index]->dreg;
10655 ins->flags |= ins_flag;
10658 if (ins_flag & MONO_INST_VOLATILE) {
10659 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10660 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10666 /* Optimize the ldobj+stobj combination */
10667 /* The reference case ends up being a load+store anyway */
10668 /* Skip this if the operation is volatile. */
10669 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10674 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10681 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10682 ins->flags |= ins_flag;
10685 if (ins_flag & MONO_INST_VOLATILE) {
10686 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10687 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10696 CHECK_STACK_OVF (1);
10698 n = read32 (ip + 1);
10700 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10701 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10702 ins->type = STACK_OBJ;
10705 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10706 MonoInst *iargs [1];
10707 char *str = (char *)mono_method_get_wrapper_data (method, n);
10709 if (cfg->compile_aot)
10710 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10712 EMIT_NEW_PCONST (cfg, iargs [0], str);
10713 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10715 if (cfg->opt & MONO_OPT_SHARED) {
10716 MonoInst *iargs [3];
10718 if (cfg->compile_aot) {
10719 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10721 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10722 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10723 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10724 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10725 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10727 if (cfg->cbb->out_of_line) {
10728 MonoInst *iargs [2];
10730 if (image == mono_defaults.corlib) {
10732 * Avoid relocations in AOT and save some space by using a
10733 * version of helper_ldstr specialized to mscorlib.
10735 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10736 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10738 /* Avoid creating the string object */
10739 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10740 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10741 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10745 if (cfg->compile_aot) {
10746 NEW_LDSTRCONST (cfg, ins, image, n);
10748 MONO_ADD_INS (cfg->cbb, ins);
10751 NEW_PCONST (cfg, ins, NULL);
10752 ins->type = STACK_OBJ;
10753 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10755 OUT_OF_MEMORY_FAILURE;
10758 MONO_ADD_INS (cfg->cbb, ins);
10767 MonoInst *iargs [2];
10768 MonoMethodSignature *fsig;
10771 MonoInst *vtable_arg = NULL;
10774 token = read32 (ip + 1);
10775 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10778 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10781 mono_save_token_info (cfg, image, token, cmethod);
10783 if (!mono_class_init (cmethod->klass))
10784 TYPE_LOAD_ERROR (cmethod->klass);
10786 context_used = mini_method_check_context_used (cfg, cmethod);
10788 if (mono_security_core_clr_enabled ())
10789 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10791 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10792 emit_class_init (cfg, cmethod->klass);
10793 CHECK_TYPELOAD (cmethod->klass);
10797 if (cfg->gsharedvt) {
10798 if (mini_is_gsharedvt_variable_signature (sig))
10799 GSHAREDVT_FAILURE (*ip);
10803 n = fsig->param_count;
10807 * Generate smaller code for the common newobj <exception> instruction in
10808 * argument checking code.
10810 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10811 is_exception_class (cmethod->klass) && n <= 2 &&
10812 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10813 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10814 MonoInst *iargs [3];
10818 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10821 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10824 iargs [1] = sp [0];
10825 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10828 iargs [1] = sp [0];
10829 iargs [2] = sp [1];
10830 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10833 g_assert_not_reached ();
10841 /* move the args to allow room for 'this' in the first position */
10847 /* check_call_signature () requires sp[0] to be set */
10848 this_ins.type = STACK_OBJ;
10849 sp [0] = &this_ins;
10850 if (check_call_signature (cfg, fsig, sp))
10855 if (mini_class_is_system_array (cmethod->klass)) {
10856 *sp = emit_get_rgctx_method (cfg, context_used,
10857 cmethod, MONO_RGCTX_INFO_METHOD);
10859 /* Avoid varargs in the common case */
10860 if (fsig->param_count == 1)
10861 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10862 else if (fsig->param_count == 2)
10863 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10864 else if (fsig->param_count == 3)
10865 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10866 else if (fsig->param_count == 4)
10867 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10869 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10870 } else if (cmethod->string_ctor) {
10871 g_assert (!context_used);
10872 g_assert (!vtable_arg);
10873 /* we simply pass a null pointer */
10874 EMIT_NEW_PCONST (cfg, *sp, NULL);
10875 /* now call the string ctor */
10876 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10878 if (cmethod->klass->valuetype) {
10879 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10880 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10881 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10886 * The code generated by mini_emit_virtual_call () expects
10887 * iargs [0] to be a boxed instance, but luckily the vcall
10888 * will be transformed into a normal call there.
10890 } else if (context_used) {
10891 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10894 MonoVTable *vtable = NULL;
10896 if (!cfg->compile_aot)
10897 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10898 CHECK_TYPELOAD (cmethod->klass);
10901 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10902 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10903 * As a workaround, we call class cctors before allocating objects.
10905 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10906 emit_class_init (cfg, cmethod->klass);
10907 if (cfg->verbose_level > 2)
10908 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10909 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10912 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10915 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10918 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10920 /* Now call the actual ctor */
10921 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10922 CHECK_CFG_EXCEPTION;
10925 if (alloc == NULL) {
10927 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10928 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10936 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10937 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10940 case CEE_CASTCLASS:
10944 token = read32 (ip + 1);
10945 klass = mini_get_class (method, token, generic_context);
10946 CHECK_TYPELOAD (klass);
10947 if (sp [0]->type != STACK_OBJ)
10950 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10951 CHECK_CFG_EXCEPTION;
10960 token = read32 (ip + 1);
10961 klass = mini_get_class (method, token, generic_context);
10962 CHECK_TYPELOAD (klass);
10963 if (sp [0]->type != STACK_OBJ)
10966 context_used = mini_class_check_context_used (cfg, klass);
10968 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10969 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10970 MonoInst *args [3];
10977 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10980 idx = get_castclass_cache_idx (cfg);
10981 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10983 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10986 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10987 MonoMethod *mono_isinst;
10988 MonoInst *iargs [1];
10991 mono_isinst = mono_marshal_get_isinst (klass);
10992 iargs [0] = sp [0];
10994 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10995 iargs, ip, cfg->real_offset, TRUE);
10996 CHECK_CFG_EXCEPTION;
10997 g_assert (costs > 0);
11000 cfg->real_offset += 5;
11004 inline_costs += costs;
11007 ins = handle_isinst (cfg, klass, *sp, context_used);
11008 CHECK_CFG_EXCEPTION;
11014 case CEE_UNBOX_ANY: {
11015 MonoInst *res, *addr;
11020 token = read32 (ip + 1);
11021 klass = mini_get_class (method, token, generic_context);
11022 CHECK_TYPELOAD (klass);
11024 mono_save_token_info (cfg, image, token, klass);
11026 context_used = mini_class_check_context_used (cfg, klass);
11028 if (mini_is_gsharedvt_klass (klass)) {
11029 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11031 } else if (generic_class_is_reference_type (cfg, klass)) {
11032 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11033 CHECK_CFG_EXCEPTION;
11034 } else if (mono_class_is_nullable (klass)) {
11035 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11037 addr = handle_unbox (cfg, klass, sp, context_used);
11039 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11050 MonoClass *enum_class;
11051 MonoMethod *has_flag;
11057 token = read32 (ip + 1);
11058 klass = mini_get_class (method, token, generic_context);
11059 CHECK_TYPELOAD (klass);
11061 mono_save_token_info (cfg, image, token, klass);
11063 context_used = mini_class_check_context_used (cfg, klass);
11065 if (generic_class_is_reference_type (cfg, klass)) {
11071 if (klass == mono_defaults.void_class)
11073 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11075 /* frequent check in generic code: box (struct), brtrue */
11080 * <push int/long ptr>
11083 * constrained. MyFlags
11084 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11086 * If we find this sequence and the operand types on box and constrained
11087 * are equal, we can emit a specialized instruction sequence instead of
11088 * the very slow HasFlag () call.
11090 if ((cfg->opt & MONO_OPT_INTRINS) &&
11091 /* Cheap checks first. */
11092 ip + 5 + 6 + 5 < end &&
11093 ip [5] == CEE_PREFIX1 &&
11094 ip [6] == CEE_CONSTRAINED_ &&
11095 ip [11] == CEE_CALLVIRT &&
11096 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11097 mono_class_is_enum (klass) &&
11098 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11099 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11100 has_flag->klass == mono_defaults.enum_class &&
11101 !strcmp (has_flag->name, "HasFlag") &&
11102 has_flag->signature->hasthis &&
11103 has_flag->signature->param_count == 1) {
11104 CHECK_TYPELOAD (enum_class);
11106 if (enum_class == klass) {
11107 MonoInst *enum_this, *enum_flag;
11112 enum_this = sp [0];
11113 enum_flag = sp [1];
11115 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11120 // FIXME: LLVM can't handle the inconsistent bb linking
11121 if (!mono_class_is_nullable (klass) &&
11122 !mini_is_gsharedvt_klass (klass) &&
11123 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11124 (ip [5] == CEE_BRTRUE ||
11125 ip [5] == CEE_BRTRUE_S ||
11126 ip [5] == CEE_BRFALSE ||
11127 ip [5] == CEE_BRFALSE_S)) {
11128 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11130 MonoBasicBlock *true_bb, *false_bb;
11134 if (cfg->verbose_level > 3) {
11135 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11136 printf ("<box+brtrue opt>\n");
11141 case CEE_BRFALSE_S:
11144 target = ip + 1 + (signed char)(*ip);
11151 target = ip + 4 + (gint)(read32 (ip));
11155 g_assert_not_reached ();
11159 * We need to link both bblocks, since it is needed for handling stack
11160 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11161 * Branching to only one of them would lead to inconsistencies, so
11162 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11164 GET_BBLOCK (cfg, true_bb, target);
11165 GET_BBLOCK (cfg, false_bb, ip);
11167 mono_link_bblock (cfg, cfg->cbb, true_bb);
11168 mono_link_bblock (cfg, cfg->cbb, false_bb);
11170 if (sp != stack_start) {
11171 handle_stack_args (cfg, stack_start, sp - stack_start);
11173 CHECK_UNVERIFIABLE (cfg);
11176 if (COMPILE_LLVM (cfg)) {
11177 dreg = alloc_ireg (cfg);
11178 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11181 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11183 /* The JIT can't eliminate the iconst+compare */
11184 MONO_INST_NEW (cfg, ins, OP_BR);
11185 ins->inst_target_bb = is_true ? true_bb : false_bb;
11186 MONO_ADD_INS (cfg->cbb, ins);
11189 start_new_bblock = 1;
11193 *sp++ = handle_box (cfg, val, klass, context_used);
11195 CHECK_CFG_EXCEPTION;
11204 token = read32 (ip + 1);
11205 klass = mini_get_class (method, token, generic_context);
11206 CHECK_TYPELOAD (klass);
11208 mono_save_token_info (cfg, image, token, klass);
11210 context_used = mini_class_check_context_used (cfg, klass);
11212 if (mono_class_is_nullable (klass)) {
11215 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11216 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11220 ins = handle_unbox (cfg, klass, sp, context_used);
11233 MonoClassField *field;
11234 #ifndef DISABLE_REMOTING
11238 gboolean is_instance;
11240 gpointer addr = NULL;
11241 gboolean is_special_static;
11243 MonoInst *store_val = NULL;
11244 MonoInst *thread_ins;
11247 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11249 if (op == CEE_STFLD) {
11252 store_val = sp [1];
11257 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11259 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11262 if (op == CEE_STSFLD) {
11265 store_val = sp [0];
11270 token = read32 (ip + 1);
11271 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11272 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11273 klass = field->parent;
11276 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11279 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11280 FIELD_ACCESS_FAILURE (method, field);
11281 mono_class_init (klass);
11283 /* if the class is Critical then transparent code cannot access it's fields */
11284 if (!is_instance && mono_security_core_clr_enabled ())
11285 ensure_method_is_allowed_to_access_field (cfg, method, field);
11287 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11288 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11289 if (mono_security_core_clr_enabled ())
11290 ensure_method_is_allowed_to_access_field (cfg, method, field);
11293 ftype = mono_field_get_type (field);
11296 * LDFLD etc. is usable on static fields as well, so convert those cases to
11299 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11311 g_assert_not_reached ();
11313 is_instance = FALSE;
11316 context_used = mini_class_check_context_used (cfg, klass);
11318 /* INSTANCE CASE */
11320 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11321 if (op == CEE_STFLD) {
11322 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11324 #ifndef DISABLE_REMOTING
11325 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11326 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11327 MonoInst *iargs [5];
11329 GSHAREDVT_FAILURE (op);
11331 iargs [0] = sp [0];
11332 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11333 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11334 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11336 iargs [4] = sp [1];
11338 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11339 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11340 iargs, ip, cfg->real_offset, TRUE);
11341 CHECK_CFG_EXCEPTION;
11342 g_assert (costs > 0);
11344 cfg->real_offset += 5;
11346 inline_costs += costs;
11348 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11355 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11357 if (mini_is_gsharedvt_klass (klass)) {
11358 MonoInst *offset_ins;
11360 context_used = mini_class_check_context_used (cfg, klass);
11362 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11363 /* The value is offset by 1 */
11364 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11365 dreg = alloc_ireg_mp (cfg);
11366 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11367 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11368 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11370 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11372 if (sp [0]->opcode != OP_LDADDR)
11373 store->flags |= MONO_INST_FAULT;
11375 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11376 /* insert call to write barrier */
11380 dreg = alloc_ireg_mp (cfg);
11381 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11382 emit_write_barrier (cfg, ptr, sp [1]);
11385 store->flags |= ins_flag;
11392 #ifndef DISABLE_REMOTING
11393 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11394 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11395 MonoInst *iargs [4];
11397 GSHAREDVT_FAILURE (op);
11399 iargs [0] = sp [0];
11400 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11401 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11402 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11403 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11404 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11405 iargs, ip, cfg->real_offset, TRUE);
11406 CHECK_CFG_EXCEPTION;
11407 g_assert (costs > 0);
11409 cfg->real_offset += 5;
11413 inline_costs += costs;
11415 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11421 if (sp [0]->type == STACK_VTYPE) {
11424 /* Have to compute the address of the variable */
11426 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11428 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11430 g_assert (var->klass == klass);
11432 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11436 if (op == CEE_LDFLDA) {
11437 if (sp [0]->type == STACK_OBJ) {
11438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11439 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11442 dreg = alloc_ireg_mp (cfg);
11444 if (mini_is_gsharedvt_klass (klass)) {
11445 MonoInst *offset_ins;
11447 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11448 /* The value is offset by 1 */
11449 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11450 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11452 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11454 ins->klass = mono_class_from_mono_type (field->type);
11455 ins->type = STACK_MP;
11460 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11462 if (mini_is_gsharedvt_klass (klass)) {
11463 MonoInst *offset_ins;
11465 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11466 /* The value is offset by 1 */
11467 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11468 dreg = alloc_ireg_mp (cfg);
11469 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11470 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11472 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11474 load->flags |= ins_flag;
11475 if (sp [0]->opcode != OP_LDADDR)
11476 load->flags |= MONO_INST_FAULT;
11488 context_used = mini_class_check_context_used (cfg, klass);
11490 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11493 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11494 * to be called here.
11496 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11497 mono_class_vtable (cfg->domain, klass);
11498 CHECK_TYPELOAD (klass);
11500 mono_domain_lock (cfg->domain);
11501 if (cfg->domain->special_static_fields)
11502 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11503 mono_domain_unlock (cfg->domain);
11505 is_special_static = mono_class_field_is_special_static (field);
11507 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11508 thread_ins = mono_get_thread_intrinsic (cfg);
11512 /* Generate IR to compute the field address */
11513 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11515 * Fast access to TLS data
11516 * Inline version of get_thread_static_data () in
11520 int idx, static_data_reg, array_reg, dreg;
11522 GSHAREDVT_FAILURE (op);
11524 MONO_ADD_INS (cfg->cbb, thread_ins);
11525 static_data_reg = alloc_ireg (cfg);
11526 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11528 if (cfg->compile_aot) {
11529 int offset_reg, offset2_reg, idx_reg;
11531 /* For TLS variables, this will return the TLS offset */
11532 EMIT_NEW_SFLDACONST (cfg, ins, field);
11533 offset_reg = ins->dreg;
11534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11535 idx_reg = alloc_ireg (cfg);
11536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11538 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11539 array_reg = alloc_ireg (cfg);
11540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11541 offset2_reg = alloc_ireg (cfg);
11542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11544 dreg = alloc_ireg (cfg);
11545 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11547 offset = (gsize)addr & 0x7fffffff;
11548 idx = offset & 0x3f;
11550 array_reg = alloc_ireg (cfg);
11551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11552 dreg = alloc_ireg (cfg);
11553 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11555 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11556 (cfg->compile_aot && is_special_static) ||
11557 (context_used && is_special_static)) {
11558 MonoInst *iargs [2];
11560 g_assert (field->parent);
11561 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11562 if (context_used) {
11563 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11564 field, MONO_RGCTX_INFO_CLASS_FIELD);
11566 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11568 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11569 } else if (context_used) {
11570 MonoInst *static_data;
11573 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11574 method->klass->name_space, method->klass->name, method->name,
11575 depth, field->offset);
11578 if (mono_class_needs_cctor_run (klass, method))
11579 emit_class_init (cfg, klass);
11582 * The pointer we're computing here is
11584 * super_info.static_data + field->offset
11586 static_data = emit_get_rgctx_klass (cfg, context_used,
11587 klass, MONO_RGCTX_INFO_STATIC_DATA);
11589 if (mini_is_gsharedvt_klass (klass)) {
11590 MonoInst *offset_ins;
11592 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11593 /* The value is offset by 1 */
11594 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11595 dreg = alloc_ireg_mp (cfg);
11596 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11597 } else if (field->offset == 0) {
11600 int addr_reg = mono_alloc_preg (cfg);
11601 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11603 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11604 MonoInst *iargs [2];
11606 g_assert (field->parent);
11607 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11608 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11609 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11611 MonoVTable *vtable = NULL;
11613 if (!cfg->compile_aot)
11614 vtable = mono_class_vtable (cfg->domain, klass);
11615 CHECK_TYPELOAD (klass);
11618 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11619 if (!(g_slist_find (class_inits, klass))) {
11620 emit_class_init (cfg, klass);
11621 if (cfg->verbose_level > 2)
11622 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11623 class_inits = g_slist_prepend (class_inits, klass);
11626 if (cfg->run_cctors) {
11628 /* This makes so that inline cannot trigger */
11629 /* .cctors: too many apps depend on them */
11630 /* running with a specific order... */
11632 if (! vtable->initialized)
11633 INLINE_FAILURE ("class init");
11634 ex = mono_runtime_class_init_full (vtable, FALSE);
11636 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11637 mono_error_set_exception_instance (&cfg->error, ex);
11638 g_assert_not_reached ();
11639 goto exception_exit;
11643 if (cfg->compile_aot)
11644 EMIT_NEW_SFLDACONST (cfg, ins, field);
11647 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11649 EMIT_NEW_PCONST (cfg, ins, addr);
11652 MonoInst *iargs [1];
11653 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11654 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11658 /* Generate IR to do the actual load/store operation */
11660 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11661 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11662 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11665 if (op == CEE_LDSFLDA) {
11666 ins->klass = mono_class_from_mono_type (ftype);
11667 ins->type = STACK_PTR;
11669 } else if (op == CEE_STSFLD) {
11672 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11673 store->flags |= ins_flag;
11675 gboolean is_const = FALSE;
11676 MonoVTable *vtable = NULL;
11677 gpointer addr = NULL;
11679 if (!context_used) {
11680 vtable = mono_class_vtable (cfg->domain, klass);
11681 CHECK_TYPELOAD (klass);
11683 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11684 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11685 int ro_type = ftype->type;
11687 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11688 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11689 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11692 GSHAREDVT_FAILURE (op);
11694 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11697 case MONO_TYPE_BOOLEAN:
11699 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11703 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11706 case MONO_TYPE_CHAR:
11708 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11712 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11717 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11721 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11726 case MONO_TYPE_PTR:
11727 case MONO_TYPE_FNPTR:
11728 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11729 type_to_eval_stack_type ((cfg), field->type, *sp);
11732 case MONO_TYPE_STRING:
11733 case MONO_TYPE_OBJECT:
11734 case MONO_TYPE_CLASS:
11735 case MONO_TYPE_SZARRAY:
11736 case MONO_TYPE_ARRAY:
11737 if (!mono_gc_is_moving ()) {
11738 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11739 type_to_eval_stack_type ((cfg), field->type, *sp);
11747 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11752 case MONO_TYPE_VALUETYPE:
11762 CHECK_STACK_OVF (1);
11764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11765 load->flags |= ins_flag;
11771 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11772 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11773 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11784 token = read32 (ip + 1);
11785 klass = mini_get_class (method, token, generic_context);
11786 CHECK_TYPELOAD (klass);
11787 if (ins_flag & MONO_INST_VOLATILE) {
11788 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11789 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11791 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11792 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11793 ins->flags |= ins_flag;
11794 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11795 generic_class_is_reference_type (cfg, klass)) {
11796 /* insert call to write barrier */
11797 emit_write_barrier (cfg, sp [0], sp [1]);
11809 const char *data_ptr;
11811 guint32 field_token;
11817 token = read32 (ip + 1);
11819 klass = mini_get_class (method, token, generic_context);
11820 CHECK_TYPELOAD (klass);
11822 context_used = mini_class_check_context_used (cfg, klass);
11824 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11825 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11826 ins->sreg1 = sp [0]->dreg;
11827 ins->type = STACK_I4;
11828 ins->dreg = alloc_ireg (cfg);
11829 MONO_ADD_INS (cfg->cbb, ins);
11830 *sp = mono_decompose_opcode (cfg, ins);
11833 if (context_used) {
11834 MonoInst *args [3];
11835 MonoClass *array_class = mono_array_class_get (klass, 1);
11836 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11838 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11841 args [0] = emit_get_rgctx_klass (cfg, context_used,
11842 array_class, MONO_RGCTX_INFO_VTABLE);
11847 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11849 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11851 if (cfg->opt & MONO_OPT_SHARED) {
11852 /* Decompose now to avoid problems with references to the domainvar */
11853 MonoInst *iargs [3];
11855 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11856 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11857 iargs [2] = sp [0];
11859 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11861 /* Decompose later since it is needed by abcrem */
11862 MonoClass *array_type = mono_array_class_get (klass, 1);
11863 mono_class_vtable (cfg->domain, array_type);
11864 CHECK_TYPELOAD (array_type);
11866 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11867 ins->dreg = alloc_ireg_ref (cfg);
11868 ins->sreg1 = sp [0]->dreg;
11869 ins->inst_newa_class = klass;
11870 ins->type = STACK_OBJ;
11871 ins->klass = array_type;
11872 MONO_ADD_INS (cfg->cbb, ins);
11873 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11874 cfg->cbb->has_array_access = TRUE;
11876 /* Needed so mono_emit_load_get_addr () gets called */
11877 mono_get_got_var (cfg);
11887 * we inline/optimize the initialization sequence if possible.
11888 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11889 * for small sizes open code the memcpy
11890 * ensure the rva field is big enough
11892 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11893 MonoMethod *memcpy_method = get_memcpy_method ();
11894 MonoInst *iargs [3];
11895 int add_reg = alloc_ireg_mp (cfg);
11897 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11898 if (cfg->compile_aot) {
11899 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11901 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11903 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11904 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11913 if (sp [0]->type != STACK_OBJ)
11916 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11917 ins->dreg = alloc_preg (cfg);
11918 ins->sreg1 = sp [0]->dreg;
11919 ins->type = STACK_I4;
11920 /* This flag will be inherited by the decomposition */
11921 ins->flags |= MONO_INST_FAULT;
11922 MONO_ADD_INS (cfg->cbb, ins);
11923 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11924 cfg->cbb->has_array_access = TRUE;
11932 if (sp [0]->type != STACK_OBJ)
11935 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11937 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11938 CHECK_TYPELOAD (klass);
11939 /* we need to make sure that this array is exactly the type it needs
11940 * to be for correctness. the wrappers are lax with their usage
11941 * so we need to ignore them here
11943 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11944 MonoClass *array_class = mono_array_class_get (klass, 1);
11945 mini_emit_check_array_type (cfg, sp [0], array_class);
11946 CHECK_TYPELOAD (array_class);
11950 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11955 case CEE_LDELEM_I1:
11956 case CEE_LDELEM_U1:
11957 case CEE_LDELEM_I2:
11958 case CEE_LDELEM_U2:
11959 case CEE_LDELEM_I4:
11960 case CEE_LDELEM_U4:
11961 case CEE_LDELEM_I8:
11963 case CEE_LDELEM_R4:
11964 case CEE_LDELEM_R8:
11965 case CEE_LDELEM_REF: {
11971 if (*ip == CEE_LDELEM) {
11973 token = read32 (ip + 1);
11974 klass = mini_get_class (method, token, generic_context);
11975 CHECK_TYPELOAD (klass);
11976 mono_class_init (klass);
11979 klass = array_access_to_klass (*ip);
11981 if (sp [0]->type != STACK_OBJ)
11984 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11986 if (mini_is_gsharedvt_variable_klass (klass)) {
11987 // FIXME-VT: OP_ICONST optimization
11988 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11989 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11990 ins->opcode = OP_LOADV_MEMBASE;
11991 } else if (sp [1]->opcode == OP_ICONST) {
11992 int array_reg = sp [0]->dreg;
11993 int index_reg = sp [1]->dreg;
11994 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11996 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11997 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11999 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12000 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12002 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12003 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12006 if (*ip == CEE_LDELEM)
12013 case CEE_STELEM_I1:
12014 case CEE_STELEM_I2:
12015 case CEE_STELEM_I4:
12016 case CEE_STELEM_I8:
12017 case CEE_STELEM_R4:
12018 case CEE_STELEM_R8:
12019 case CEE_STELEM_REF:
12024 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12026 if (*ip == CEE_STELEM) {
12028 token = read32 (ip + 1);
12029 klass = mini_get_class (method, token, generic_context);
12030 CHECK_TYPELOAD (klass);
12031 mono_class_init (klass);
12034 klass = array_access_to_klass (*ip);
12036 if (sp [0]->type != STACK_OBJ)
12039 emit_array_store (cfg, klass, sp, TRUE);
12041 if (*ip == CEE_STELEM)
12048 case CEE_CKFINITE: {
12052 if (cfg->llvm_only) {
12053 MonoInst *iargs [1];
12055 iargs [0] = sp [0];
12056 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12058 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12059 ins->sreg1 = sp [0]->dreg;
12060 ins->dreg = alloc_freg (cfg);
12061 ins->type = STACK_R8;
12062 MONO_ADD_INS (cfg->cbb, ins);
12064 *sp++ = mono_decompose_opcode (cfg, ins);
12070 case CEE_REFANYVAL: {
12071 MonoInst *src_var, *src;
12073 int klass_reg = alloc_preg (cfg);
12074 int dreg = alloc_preg (cfg);
12076 GSHAREDVT_FAILURE (*ip);
12079 MONO_INST_NEW (cfg, ins, *ip);
12082 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12083 CHECK_TYPELOAD (klass);
12085 context_used = mini_class_check_context_used (cfg, klass);
12088 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12090 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12091 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12094 if (context_used) {
12095 MonoInst *klass_ins;
12097 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12098 klass, MONO_RGCTX_INFO_KLASS);
12101 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12102 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12104 mini_emit_class_check (cfg, klass_reg, klass);
12106 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12107 ins->type = STACK_MP;
12108 ins->klass = klass;
12113 case CEE_MKREFANY: {
12114 MonoInst *loc, *addr;
12116 GSHAREDVT_FAILURE (*ip);
12119 MONO_INST_NEW (cfg, ins, *ip);
12122 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12123 CHECK_TYPELOAD (klass);
12125 context_used = mini_class_check_context_used (cfg, klass);
12127 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12128 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12130 if (context_used) {
12131 MonoInst *const_ins;
12132 int type_reg = alloc_preg (cfg);
12134 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12135 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12138 } else if (cfg->compile_aot) {
12139 int const_reg = alloc_preg (cfg);
12140 int type_reg = alloc_preg (cfg);
12142 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12143 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12144 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12147 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12148 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12150 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12152 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12153 ins->type = STACK_VTYPE;
12154 ins->klass = mono_defaults.typed_reference_class;
12159 case CEE_LDTOKEN: {
12161 MonoClass *handle_class;
12163 CHECK_STACK_OVF (1);
12166 n = read32 (ip + 1);
12168 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12169 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12170 handle = mono_method_get_wrapper_data (method, n);
12171 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12172 if (handle_class == mono_defaults.typehandle_class)
12173 handle = &((MonoClass*)handle)->byval_arg;
12176 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12181 mono_class_init (handle_class);
12182 if (cfg->gshared) {
12183 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12184 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12185 /* This case handles ldtoken
12186 of an open type, like for
12189 } else if (handle_class == mono_defaults.typehandle_class) {
12190 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12191 } else if (handle_class == mono_defaults.fieldhandle_class)
12192 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12193 else if (handle_class == mono_defaults.methodhandle_class)
12194 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12196 g_assert_not_reached ();
12199 if ((cfg->opt & MONO_OPT_SHARED) &&
12200 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12201 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12202 MonoInst *addr, *vtvar, *iargs [3];
12203 int method_context_used;
12205 method_context_used = mini_method_check_context_used (cfg, method);
12207 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12209 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12210 EMIT_NEW_ICONST (cfg, iargs [1], n);
12211 if (method_context_used) {
12212 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12213 method, MONO_RGCTX_INFO_METHOD);
12214 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12216 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12217 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12219 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12221 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12223 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12225 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12226 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12227 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12228 (cmethod->klass == mono_defaults.systemtype_class) &&
12229 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12230 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12232 mono_class_init (tclass);
12233 if (context_used) {
12234 ins = emit_get_rgctx_klass (cfg, context_used,
12235 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12236 } else if (cfg->compile_aot) {
12237 if (method->wrapper_type) {
12238 mono_error_init (&error); //got to do it since there are multiple conditionals below
12239 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12240 /* Special case for static synchronized wrappers */
12241 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12243 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12244 /* FIXME: n is not a normal token */
12246 EMIT_NEW_PCONST (cfg, ins, NULL);
12249 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12253 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &error);
12254 mono_error_raise_exception (&error); /* FIXME don't raise here */
12256 EMIT_NEW_PCONST (cfg, ins, rt);
12258 ins->type = STACK_OBJ;
12259 ins->klass = cmethod->klass;
12262 MonoInst *addr, *vtvar;
12264 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12266 if (context_used) {
12267 if (handle_class == mono_defaults.typehandle_class) {
12268 ins = emit_get_rgctx_klass (cfg, context_used,
12269 mono_class_from_mono_type ((MonoType *)handle),
12270 MONO_RGCTX_INFO_TYPE);
12271 } else if (handle_class == mono_defaults.methodhandle_class) {
12272 ins = emit_get_rgctx_method (cfg, context_used,
12273 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12274 } else if (handle_class == mono_defaults.fieldhandle_class) {
12275 ins = emit_get_rgctx_field (cfg, context_used,
12276 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12278 g_assert_not_reached ();
12280 } else if (cfg->compile_aot) {
12281 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12283 EMIT_NEW_PCONST (cfg, ins, handle);
12285 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12286 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12287 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12297 MONO_INST_NEW (cfg, ins, OP_THROW);
12299 ins->sreg1 = sp [0]->dreg;
12301 cfg->cbb->out_of_line = TRUE;
12302 MONO_ADD_INS (cfg->cbb, ins);
12303 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12304 MONO_ADD_INS (cfg->cbb, ins);
12307 link_bblock (cfg, cfg->cbb, end_bblock);
12308 start_new_bblock = 1;
12309 /* This can complicate code generation for llvm since the return value might not be defined */
12310 if (COMPILE_LLVM (cfg))
12311 INLINE_FAILURE ("throw");
12313 case CEE_ENDFINALLY:
12314 /* mono_save_seq_point_info () depends on this */
12315 if (sp != stack_start)
12316 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12317 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12318 MONO_ADD_INS (cfg->cbb, ins);
12320 start_new_bblock = 1;
12323 * Control will leave the method so empty the stack, otherwise
12324 * the next basic block will start with a nonempty stack.
12326 while (sp != stack_start) {
12331 case CEE_LEAVE_S: {
12334 if (*ip == CEE_LEAVE) {
12336 target = ip + 5 + (gint32)read32(ip + 1);
12339 target = ip + 2 + (signed char)(ip [1]);
12342 /* empty the stack */
12343 while (sp != stack_start) {
12348 * If this leave statement is in a catch block, check for a
12349 * pending exception, and rethrow it if necessary.
12350 * We avoid doing this in runtime invoke wrappers, since those are called
12351 * by native code which excepts the wrapper to catch all exceptions.
12353 for (i = 0; i < header->num_clauses; ++i) {
12354 MonoExceptionClause *clause = &header->clauses [i];
12357 * Use <= in the final comparison to handle clauses with multiple
12358 * leave statements, like in bug #78024.
12359 * The ordering of the exception clauses guarantees that we find the
12360 * innermost clause.
12362 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12364 MonoBasicBlock *dont_throw;
12369 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12372 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12374 NEW_BBLOCK (cfg, dont_throw);
12377 * Currently, we always rethrow the abort exception, despite the
12378 * fact that this is not correct. See thread6.cs for an example.
12379 * But propagating the abort exception is more important than
12380 * getting the sematics right.
12382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12384 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12386 MONO_START_BB (cfg, dont_throw);
12391 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12394 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12396 MonoExceptionClause *clause;
12398 for (tmp = handlers; tmp; tmp = tmp->next) {
12399 clause = (MonoExceptionClause *)tmp->data;
12400 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12402 link_bblock (cfg, cfg->cbb, tblock);
12403 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12404 ins->inst_target_bb = tblock;
12405 ins->inst_eh_block = clause;
12406 MONO_ADD_INS (cfg->cbb, ins);
12407 cfg->cbb->has_call_handler = 1;
12408 if (COMPILE_LLVM (cfg)) {
12409 MonoBasicBlock *target_bb;
12412 * Link the finally bblock with the target, since it will
12413 * conceptually branch there.
12415 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12416 GET_BBLOCK (cfg, target_bb, target);
12417 link_bblock (cfg, tblock, target_bb);
12420 g_list_free (handlers);
12423 MONO_INST_NEW (cfg, ins, OP_BR);
12424 MONO_ADD_INS (cfg->cbb, ins);
12425 GET_BBLOCK (cfg, tblock, target);
12426 link_bblock (cfg, cfg->cbb, tblock);
12427 ins->inst_target_bb = tblock;
12429 start_new_bblock = 1;
12431 if (*ip == CEE_LEAVE)
12440 * Mono specific opcodes
12442 case MONO_CUSTOM_PREFIX: {
12444 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12448 case CEE_MONO_ICALL: {
12450 MonoJitICallInfo *info;
12452 token = read32 (ip + 2);
12453 func = mono_method_get_wrapper_data (method, token);
12454 info = mono_find_jit_icall_by_addr (func);
12456 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12459 CHECK_STACK (info->sig->param_count);
12460 sp -= info->sig->param_count;
12462 ins = mono_emit_jit_icall (cfg, info->func, sp);
12463 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12467 inline_costs += 10 * num_calls++;
12471 case CEE_MONO_LDPTR_CARD_TABLE:
12472 case CEE_MONO_LDPTR_NURSERY_START:
12473 case CEE_MONO_LDPTR_NURSERY_BITS:
12474 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12475 CHECK_STACK_OVF (1);
12478 case CEE_MONO_LDPTR_CARD_TABLE:
12479 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12481 case CEE_MONO_LDPTR_NURSERY_START:
12482 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12484 case CEE_MONO_LDPTR_NURSERY_BITS:
12485 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12487 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12488 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12494 inline_costs += 10 * num_calls++;
12497 case CEE_MONO_LDPTR: {
12500 CHECK_STACK_OVF (1);
12502 token = read32 (ip + 2);
12504 ptr = mono_method_get_wrapper_data (method, token);
12505 EMIT_NEW_PCONST (cfg, ins, ptr);
12508 inline_costs += 10 * num_calls++;
12509 /* Can't embed random pointers into AOT code */
12513 case CEE_MONO_JIT_ICALL_ADDR: {
12514 MonoJitICallInfo *callinfo;
12517 CHECK_STACK_OVF (1);
12519 token = read32 (ip + 2);
12521 ptr = mono_method_get_wrapper_data (method, token);
12522 callinfo = mono_find_jit_icall_by_addr (ptr);
12523 g_assert (callinfo);
12524 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12527 inline_costs += 10 * num_calls++;
12530 case CEE_MONO_ICALL_ADDR: {
12531 MonoMethod *cmethod;
12534 CHECK_STACK_OVF (1);
12536 token = read32 (ip + 2);
12538 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12540 if (cfg->compile_aot) {
12541 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12543 ptr = mono_lookup_internal_call (cmethod);
12545 EMIT_NEW_PCONST (cfg, ins, ptr);
12551 case CEE_MONO_VTADDR: {
12552 MonoInst *src_var, *src;
12558 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12559 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12564 case CEE_MONO_NEWOBJ: {
12565 MonoInst *iargs [2];
12567 CHECK_STACK_OVF (1);
12569 token = read32 (ip + 2);
12570 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12571 mono_class_init (klass);
12572 NEW_DOMAINCONST (cfg, iargs [0]);
12573 MONO_ADD_INS (cfg->cbb, iargs [0]);
12574 NEW_CLASSCONST (cfg, iargs [1], klass);
12575 MONO_ADD_INS (cfg->cbb, iargs [1]);
12576 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12578 inline_costs += 10 * num_calls++;
12581 case CEE_MONO_OBJADDR:
12584 MONO_INST_NEW (cfg, ins, OP_MOVE);
12585 ins->dreg = alloc_ireg_mp (cfg);
12586 ins->sreg1 = sp [0]->dreg;
12587 ins->type = STACK_MP;
12588 MONO_ADD_INS (cfg->cbb, ins);
12592 case CEE_MONO_LDNATIVEOBJ:
12594 * Similar to LDOBJ, but instead load the unmanaged
12595 * representation of the vtype to the stack.
12600 token = read32 (ip + 2);
12601 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12602 g_assert (klass->valuetype);
12603 mono_class_init (klass);
12606 MonoInst *src, *dest, *temp;
12609 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12610 temp->backend.is_pinvoke = 1;
12611 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12612 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12614 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12615 dest->type = STACK_VTYPE;
12616 dest->klass = klass;
12622 case CEE_MONO_RETOBJ: {
12624 * Same as RET, but return the native representation of a vtype
12627 g_assert (cfg->ret);
12628 g_assert (mono_method_signature (method)->pinvoke);
12633 token = read32 (ip + 2);
12634 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12636 if (!cfg->vret_addr) {
12637 g_assert (cfg->ret_var_is_local);
12639 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12641 EMIT_NEW_RETLOADA (cfg, ins);
12643 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12645 if (sp != stack_start)
12648 MONO_INST_NEW (cfg, ins, OP_BR);
12649 ins->inst_target_bb = end_bblock;
12650 MONO_ADD_INS (cfg->cbb, ins);
12651 link_bblock (cfg, cfg->cbb, end_bblock);
12652 start_new_bblock = 1;
12656 case CEE_MONO_CISINST:
12657 case CEE_MONO_CCASTCLASS: {
12662 token = read32 (ip + 2);
12663 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12664 if (ip [1] == CEE_MONO_CISINST)
12665 ins = handle_cisinst (cfg, klass, sp [0]);
12667 ins = handle_ccastclass (cfg, klass, sp [0]);
12672 case CEE_MONO_SAVE_LMF:
12673 case CEE_MONO_RESTORE_LMF:
12676 case CEE_MONO_CLASSCONST:
12677 CHECK_STACK_OVF (1);
12679 token = read32 (ip + 2);
12680 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12683 inline_costs += 10 * num_calls++;
12685 case CEE_MONO_NOT_TAKEN:
12686 cfg->cbb->out_of_line = TRUE;
12689 case CEE_MONO_TLS: {
12692 CHECK_STACK_OVF (1);
12694 key = (MonoTlsKey)read32 (ip + 2);
12695 g_assert (key < TLS_KEY_NUM);
12697 ins = mono_create_tls_get (cfg, key);
12699 if (cfg->compile_aot) {
12701 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12702 ins->dreg = alloc_preg (cfg);
12703 ins->type = STACK_PTR;
12705 g_assert_not_reached ();
12708 ins->type = STACK_PTR;
12709 MONO_ADD_INS (cfg->cbb, ins);
12714 case CEE_MONO_DYN_CALL: {
12715 MonoCallInst *call;
12717 /* It would be easier to call a trampoline, but that would put an
12718 * extra frame on the stack, confusing exception handling. So
12719 * implement it inline using an opcode for now.
12722 if (!cfg->dyn_call_var) {
12723 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12724 /* prevent it from being register allocated */
12725 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12728 /* Has to use a call inst since it local regalloc expects it */
12729 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12730 ins = (MonoInst*)call;
12732 ins->sreg1 = sp [0]->dreg;
12733 ins->sreg2 = sp [1]->dreg;
12734 MONO_ADD_INS (cfg->cbb, ins);
12736 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12739 inline_costs += 10 * num_calls++;
12743 case CEE_MONO_MEMORY_BARRIER: {
12745 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12749 case CEE_MONO_JIT_ATTACH: {
12750 MonoInst *args [16], *domain_ins;
12751 MonoInst *ad_ins, *jit_tls_ins;
12752 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12754 cfg->attach_cookie = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12755 cfg->attach_dummy = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12757 if (mono_threads_is_coop_enabled ()) {
12758 /* AOT code is only used in the root domain */
12759 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12760 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12761 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12762 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12764 EMIT_NEW_PCONST (cfg, ins, NULL);
12765 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12767 ad_ins = mono_get_domain_intrinsic (cfg);
12768 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12770 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12771 NEW_BBLOCK (cfg, next_bb);
12772 NEW_BBLOCK (cfg, call_bb);
12774 if (cfg->compile_aot) {
12775 /* AOT code is only used in the root domain */
12776 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12778 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12780 MONO_ADD_INS (cfg->cbb, ad_ins);
12781 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12782 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12784 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12786 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12789 MONO_START_BB (cfg, call_bb);
12792 /* AOT code is only used in the root domain */
12793 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12794 EMIT_NEW_PCONST (cfg, args [1], NULL);
12795 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12796 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12799 MONO_START_BB (cfg, next_bb);
12805 case CEE_MONO_JIT_DETACH: {
12806 MonoInst *args [16];
12808 /* Restore the original domain */
12809 dreg = alloc_ireg (cfg);
12810 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->attach_cookie->dreg);
12811 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12812 mono_emit_jit_icall (cfg, mono_jit_thread_detach, args);
12816 case CEE_MONO_CALLI_EXTRA_ARG: {
12818 MonoMethodSignature *fsig;
12822 * This is the same as CEE_CALLI, but passes an additional argument
12823 * to the called method in llvmonly mode.
12824 * This is only used by delegate invoke wrappers to call the
12825 * actual delegate method.
12827 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12830 token = read32 (ip + 2);
12838 fsig = mini_get_signature (method, token, generic_context);
12840 if (cfg->llvm_only)
12841 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12843 n = fsig->param_count + fsig->hasthis + 1;
12850 if (cfg->llvm_only) {
12852 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12853 * cconv. This is set by mono_init_delegate ().
12855 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12856 MonoInst *callee = addr;
12857 MonoInst *call, *localloc_ins;
12858 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12859 int low_bit_reg = alloc_preg (cfg);
12861 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12862 NEW_BBLOCK (cfg, end_bb);
12864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12868 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12869 addr = emit_get_rgctx_sig (cfg, context_used,
12870 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12872 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12874 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12875 ins->dreg = alloc_preg (cfg);
12876 ins->inst_imm = 2 * SIZEOF_VOID_P;
12877 MONO_ADD_INS (cfg->cbb, ins);
12878 localloc_ins = ins;
12879 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12883 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12884 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12886 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12887 MONO_START_BB (cfg, is_gsharedvt_bb);
12888 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12889 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12890 ins->dreg = call->dreg;
12892 MONO_START_BB (cfg, end_bb);
12894 /* Caller uses a normal calling conv */
12896 MonoInst *callee = addr;
12897 MonoInst *call, *localloc_ins;
12898 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12899 int low_bit_reg = alloc_preg (cfg);
12901 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12902 NEW_BBLOCK (cfg, end_bb);
12904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12908 /* Normal case: callee uses a normal cconv, no conversion is needed */
12909 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12910 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12911 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12912 MONO_START_BB (cfg, is_gsharedvt_bb);
12913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12914 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12915 MONO_ADD_INS (cfg->cbb, addr);
12917 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12919 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12920 ins->dreg = alloc_preg (cfg);
12921 ins->inst_imm = 2 * SIZEOF_VOID_P;
12922 MONO_ADD_INS (cfg->cbb, ins);
12923 localloc_ins = ins;
12924 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12926 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12928 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12929 ins->dreg = call->dreg;
12930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12932 MONO_START_BB (cfg, end_bb);
12935 /* Same as CEE_CALLI */
12936 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12938 * We pass the address to the gsharedvt trampoline in the rgctx reg
12940 MonoInst *callee = addr;
12942 addr = emit_get_rgctx_sig (cfg, context_used,
12943 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12944 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12946 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12950 if (!MONO_TYPE_IS_VOID (fsig->ret))
12951 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12953 CHECK_CFG_EXCEPTION;
12957 constrained_class = NULL;
12961 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12967 case CEE_PREFIX1: {
12970 case CEE_ARGLIST: {
12971 /* somewhat similar to LDTOKEN */
12972 MonoInst *addr, *vtvar;
12973 CHECK_STACK_OVF (1);
12974 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12976 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12977 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12979 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12980 ins->type = STACK_VTYPE;
12981 ins->klass = mono_defaults.argumenthandle_class;
12991 MonoInst *cmp, *arg1, *arg2;
12999 * The following transforms:
13000 * CEE_CEQ into OP_CEQ
13001 * CEE_CGT into OP_CGT
13002 * CEE_CGT_UN into OP_CGT_UN
13003 * CEE_CLT into OP_CLT
13004 * CEE_CLT_UN into OP_CLT_UN
13006 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13008 MONO_INST_NEW (cfg, ins, cmp->opcode);
13009 cmp->sreg1 = arg1->dreg;
13010 cmp->sreg2 = arg2->dreg;
13011 type_from_op (cfg, cmp, arg1, arg2);
13013 add_widen_op (cfg, cmp, &arg1, &arg2);
13014 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13015 cmp->opcode = OP_LCOMPARE;
13016 else if (arg1->type == STACK_R4)
13017 cmp->opcode = OP_RCOMPARE;
13018 else if (arg1->type == STACK_R8)
13019 cmp->opcode = OP_FCOMPARE;
13021 cmp->opcode = OP_ICOMPARE;
13022 MONO_ADD_INS (cfg->cbb, cmp);
13023 ins->type = STACK_I4;
13024 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13025 type_from_op (cfg, ins, arg1, arg2);
13027 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13029 * The backends expect the fceq opcodes to do the
13032 ins->sreg1 = cmp->sreg1;
13033 ins->sreg2 = cmp->sreg2;
13036 MONO_ADD_INS (cfg->cbb, ins);
13042 MonoInst *argconst;
13043 MonoMethod *cil_method;
13045 CHECK_STACK_OVF (1);
13047 n = read32 (ip + 2);
13048 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13051 mono_class_init (cmethod->klass);
13053 mono_save_token_info (cfg, image, n, cmethod);
13055 context_used = mini_method_check_context_used (cfg, cmethod);
13057 cil_method = cmethod;
13058 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13059 METHOD_ACCESS_FAILURE (method, cil_method);
13061 if (mono_security_core_clr_enabled ())
13062 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13065 * Optimize the common case of ldftn+delegate creation
13067 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13068 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13069 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13070 MonoInst *target_ins, *handle_ins;
13071 MonoMethod *invoke;
13072 int invoke_context_used;
13074 invoke = mono_get_delegate_invoke (ctor_method->klass);
13075 if (!invoke || !mono_method_signature (invoke))
13078 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13080 target_ins = sp [-1];
13082 if (mono_security_core_clr_enabled ())
13083 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13085 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13086 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13087 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13089 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13093 /* FIXME: SGEN support */
13094 if (invoke_context_used == 0 || cfg->llvm_only) {
13096 if (cfg->verbose_level > 3)
13097 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13098 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13101 CHECK_CFG_EXCEPTION;
13111 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13112 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13116 inline_costs += 10 * num_calls++;
13119 case CEE_LDVIRTFTN: {
13120 MonoInst *args [2];
13124 n = read32 (ip + 2);
13125 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13128 mono_class_init (cmethod->klass);
13130 context_used = mini_method_check_context_used (cfg, cmethod);
13132 if (mono_security_core_clr_enabled ())
13133 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13136 * Optimize the common case of ldvirtftn+delegate creation
13138 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13139 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13140 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13141 MonoInst *target_ins, *handle_ins;
13142 MonoMethod *invoke;
13143 int invoke_context_used;
13144 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13146 invoke = mono_get_delegate_invoke (ctor_method->klass);
13147 if (!invoke || !mono_method_signature (invoke))
13150 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13152 target_ins = sp [-1];
13154 if (mono_security_core_clr_enabled ())
13155 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13157 /* FIXME: SGEN support */
13158 if (invoke_context_used == 0 || cfg->llvm_only) {
13160 if (cfg->verbose_level > 3)
13161 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13162 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13165 CHECK_CFG_EXCEPTION;
13178 args [1] = emit_get_rgctx_method (cfg, context_used,
13179 cmethod, MONO_RGCTX_INFO_METHOD);
13182 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13184 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13187 inline_costs += 10 * num_calls++;
13191 CHECK_STACK_OVF (1);
13193 n = read16 (ip + 2);
13195 EMIT_NEW_ARGLOAD (cfg, ins, n);
13200 CHECK_STACK_OVF (1);
13202 n = read16 (ip + 2);
13204 NEW_ARGLOADA (cfg, ins, n);
13205 MONO_ADD_INS (cfg->cbb, ins);
13213 n = read16 (ip + 2);
13215 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13217 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13221 CHECK_STACK_OVF (1);
13223 n = read16 (ip + 2);
13225 EMIT_NEW_LOCLOAD (cfg, ins, n);
13230 unsigned char *tmp_ip;
13231 CHECK_STACK_OVF (1);
13233 n = read16 (ip + 2);
13236 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13242 EMIT_NEW_LOCLOADA (cfg, ins, n);
13251 n = read16 (ip + 2);
13253 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13255 emit_stloc_ir (cfg, sp, header, n);
13262 if (sp != stack_start)
13264 if (cfg->method != method)
13266 * Inlining this into a loop in a parent could lead to
13267 * stack overflows which is different behavior than the
13268 * non-inlined case, thus disable inlining in this case.
13270 INLINE_FAILURE("localloc");
13272 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13273 ins->dreg = alloc_preg (cfg);
13274 ins->sreg1 = sp [0]->dreg;
13275 ins->type = STACK_PTR;
13276 MONO_ADD_INS (cfg->cbb, ins);
13278 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13280 ins->flags |= MONO_INST_INIT;
13285 case CEE_ENDFILTER: {
13286 MonoExceptionClause *clause, *nearest;
13291 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13293 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13294 ins->sreg1 = (*sp)->dreg;
13295 MONO_ADD_INS (cfg->cbb, ins);
13296 start_new_bblock = 1;
13300 for (cc = 0; cc < header->num_clauses; ++cc) {
13301 clause = &header->clauses [cc];
13302 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13303 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13304 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13307 g_assert (nearest);
13308 if ((ip - header->code) != nearest->handler_offset)
13313 case CEE_UNALIGNED_:
13314 ins_flag |= MONO_INST_UNALIGNED;
13315 /* FIXME: record alignment? we can assume 1 for now */
13319 case CEE_VOLATILE_:
13320 ins_flag |= MONO_INST_VOLATILE;
13324 ins_flag |= MONO_INST_TAILCALL;
13325 cfg->flags |= MONO_CFG_HAS_TAIL;
13326 /* Can't inline tail calls at this time */
13327 inline_costs += 100000;
13334 token = read32 (ip + 2);
13335 klass = mini_get_class (method, token, generic_context);
13336 CHECK_TYPELOAD (klass);
13337 if (generic_class_is_reference_type (cfg, klass))
13338 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13340 mini_emit_initobj (cfg, *sp, NULL, klass);
13344 case CEE_CONSTRAINED_:
13346 token = read32 (ip + 2);
13347 constrained_class = mini_get_class (method, token, generic_context);
13348 CHECK_TYPELOAD (constrained_class);
13352 case CEE_INITBLK: {
13353 MonoInst *iargs [3];
13357 /* Skip optimized paths for volatile operations. */
13358 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13359 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13360 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13361 /* emit_memset only works when val == 0 */
13362 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13365 iargs [0] = sp [0];
13366 iargs [1] = sp [1];
13367 iargs [2] = sp [2];
13368 if (ip [1] == CEE_CPBLK) {
13370 * FIXME: It's unclear whether we should be emitting both the acquire
13371 * and release barriers for cpblk. It is technically both a load and
13372 * store operation, so it seems like that's the sensible thing to do.
13374 * FIXME: We emit full barriers on both sides of the operation for
13375 * simplicity. We should have a separate atomic memcpy method instead.
13377 MonoMethod *memcpy_method = get_memcpy_method ();
13379 if (ins_flag & MONO_INST_VOLATILE)
13380 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13382 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13383 call->flags |= ins_flag;
13385 if (ins_flag & MONO_INST_VOLATILE)
13386 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13388 MonoMethod *memset_method = get_memset_method ();
13389 if (ins_flag & MONO_INST_VOLATILE) {
13390 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13391 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13393 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13394 call->flags |= ins_flag;
13405 ins_flag |= MONO_INST_NOTYPECHECK;
13407 ins_flag |= MONO_INST_NORANGECHECK;
13408 /* we ignore the no-nullcheck for now since we
13409 * really do it explicitly only when doing callvirt->call
13413 case CEE_RETHROW: {
13415 int handler_offset = -1;
13417 for (i = 0; i < header->num_clauses; ++i) {
13418 MonoExceptionClause *clause = &header->clauses [i];
13419 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13420 handler_offset = clause->handler_offset;
13425 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13427 if (handler_offset == -1)
13430 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13431 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13432 ins->sreg1 = load->dreg;
13433 MONO_ADD_INS (cfg->cbb, ins);
13435 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13436 MONO_ADD_INS (cfg->cbb, ins);
13439 link_bblock (cfg, cfg->cbb, end_bblock);
13440 start_new_bblock = 1;
13448 CHECK_STACK_OVF (1);
13450 token = read32 (ip + 2);
13451 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13452 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13455 val = mono_type_size (type, &ialign);
13457 MonoClass *klass = mini_get_class (method, token, generic_context);
13458 CHECK_TYPELOAD (klass);
13460 val = mono_type_size (&klass->byval_arg, &ialign);
13462 if (mini_is_gsharedvt_klass (klass))
13463 GSHAREDVT_FAILURE (*ip);
13465 EMIT_NEW_ICONST (cfg, ins, val);
13470 case CEE_REFANYTYPE: {
13471 MonoInst *src_var, *src;
13473 GSHAREDVT_FAILURE (*ip);
13479 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13481 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13482 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13483 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13488 case CEE_READONLY_:
13501 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13511 g_warning ("opcode 0x%02x not handled", *ip);
13515 if (start_new_bblock != 1)
13518 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13519 if (cfg->cbb->next_bb) {
13520 /* This could already be set because of inlining, #693905 */
13521 MonoBasicBlock *bb = cfg->cbb;
13523 while (bb->next_bb)
13525 bb->next_bb = end_bblock;
13527 cfg->cbb->next_bb = end_bblock;
13530 if (cfg->method == method && cfg->domainvar) {
13532 MonoInst *get_domain;
13534 cfg->cbb = init_localsbb;
13536 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13537 MONO_ADD_INS (cfg->cbb, get_domain);
13539 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13541 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13542 MONO_ADD_INS (cfg->cbb, store);
13545 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13546 if (cfg->compile_aot)
13547 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13548 mono_get_got_var (cfg);
13551 if (cfg->method == method && cfg->got_var)
13552 mono_emit_load_got_addr (cfg);
13554 if (init_localsbb) {
13555 cfg->cbb = init_localsbb;
13557 for (i = 0; i < header->num_locals; ++i) {
13558 emit_init_local (cfg, i, header->locals [i], init_locals);
13562 if (cfg->init_ref_vars && cfg->method == method) {
13563 /* Emit initialization for ref vars */
13564 // FIXME: Avoid duplication initialization for IL locals.
13565 for (i = 0; i < cfg->num_varinfo; ++i) {
13566 MonoInst *ins = cfg->varinfo [i];
13568 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13569 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13573 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13574 cfg->cbb = init_localsbb;
13575 emit_push_lmf (cfg);
13578 cfg->cbb = init_localsbb;
13579 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13582 MonoBasicBlock *bb;
13585 * Make seq points at backward branch targets interruptable.
13587 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13588 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13589 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13592 /* Add a sequence point for method entry/exit events */
13593 if (seq_points && cfg->gen_sdb_seq_points) {
13594 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13595 MONO_ADD_INS (init_localsbb, ins);
13596 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13597 MONO_ADD_INS (cfg->bb_exit, ins);
13601 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13602 * the code they refer to was dead (#11880).
13604 if (sym_seq_points) {
13605 for (i = 0; i < header->code_size; ++i) {
13606 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13609 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13610 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13617 if (cfg->method == method) {
13618 MonoBasicBlock *bb;
13619 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13620 bb->region = mono_find_block_region (cfg, bb->real_offset);
13622 mono_create_spvar_for_region (cfg, bb->region);
13623 if (cfg->verbose_level > 2)
13624 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13628 if (inline_costs < 0) {
13631 /* Method is too large */
13632 mname = mono_method_full_name (method, TRUE);
13633 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13637 if ((cfg->verbose_level > 2) && (cfg->method == method))
13638 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13643 g_assert (!mono_error_ok (&cfg->error));
13647 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13651 set_exception_type_from_invalid_il (cfg, method, ip);
13655 g_slist_free (class_inits);
13656 mono_basic_block_free (original_bb);
13657 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13658 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13659 if (cfg->exception_type)
13662 return inline_costs;
13666 store_membase_reg_to_store_membase_imm (int opcode)
13669 case OP_STORE_MEMBASE_REG:
13670 return OP_STORE_MEMBASE_IMM;
13671 case OP_STOREI1_MEMBASE_REG:
13672 return OP_STOREI1_MEMBASE_IMM;
13673 case OP_STOREI2_MEMBASE_REG:
13674 return OP_STOREI2_MEMBASE_IMM;
13675 case OP_STOREI4_MEMBASE_REG:
13676 return OP_STOREI4_MEMBASE_IMM;
13677 case OP_STOREI8_MEMBASE_REG:
13678 return OP_STOREI8_MEMBASE_IMM;
13680 g_assert_not_reached ();
13687 mono_op_to_op_imm (int opcode)
13691 return OP_IADD_IMM;
13693 return OP_ISUB_IMM;
13695 return OP_IDIV_IMM;
13697 return OP_IDIV_UN_IMM;
13699 return OP_IREM_IMM;
13701 return OP_IREM_UN_IMM;
13703 return OP_IMUL_IMM;
13705 return OP_IAND_IMM;
13709 return OP_IXOR_IMM;
13711 return OP_ISHL_IMM;
13713 return OP_ISHR_IMM;
13715 return OP_ISHR_UN_IMM;
13718 return OP_LADD_IMM;
13720 return OP_LSUB_IMM;
13722 return OP_LAND_IMM;
13726 return OP_LXOR_IMM;
13728 return OP_LSHL_IMM;
13730 return OP_LSHR_IMM;
13732 return OP_LSHR_UN_IMM;
13733 #if SIZEOF_REGISTER == 8
13735 return OP_LREM_IMM;
13739 return OP_COMPARE_IMM;
13741 return OP_ICOMPARE_IMM;
13743 return OP_LCOMPARE_IMM;
13745 case OP_STORE_MEMBASE_REG:
13746 return OP_STORE_MEMBASE_IMM;
13747 case OP_STOREI1_MEMBASE_REG:
13748 return OP_STOREI1_MEMBASE_IMM;
13749 case OP_STOREI2_MEMBASE_REG:
13750 return OP_STOREI2_MEMBASE_IMM;
13751 case OP_STOREI4_MEMBASE_REG:
13752 return OP_STOREI4_MEMBASE_IMM;
13754 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13756 return OP_X86_PUSH_IMM;
13757 case OP_X86_COMPARE_MEMBASE_REG:
13758 return OP_X86_COMPARE_MEMBASE_IMM;
13760 #if defined(TARGET_AMD64)
13761 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13762 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13764 case OP_VOIDCALL_REG:
13765 return OP_VOIDCALL;
13773 return OP_LOCALLOC_IMM;
13780 ldind_to_load_membase (int opcode)
13784 return OP_LOADI1_MEMBASE;
13786 return OP_LOADU1_MEMBASE;
13788 return OP_LOADI2_MEMBASE;
13790 return OP_LOADU2_MEMBASE;
13792 return OP_LOADI4_MEMBASE;
13794 return OP_LOADU4_MEMBASE;
13796 return OP_LOAD_MEMBASE;
13797 case CEE_LDIND_REF:
13798 return OP_LOAD_MEMBASE;
13800 return OP_LOADI8_MEMBASE;
13802 return OP_LOADR4_MEMBASE;
13804 return OP_LOADR8_MEMBASE;
13806 g_assert_not_reached ();
13813 stind_to_store_membase (int opcode)
13817 return OP_STOREI1_MEMBASE_REG;
13819 return OP_STOREI2_MEMBASE_REG;
13821 return OP_STOREI4_MEMBASE_REG;
13823 case CEE_STIND_REF:
13824 return OP_STORE_MEMBASE_REG;
13826 return OP_STOREI8_MEMBASE_REG;
13828 return OP_STORER4_MEMBASE_REG;
13830 return OP_STORER8_MEMBASE_REG;
13832 g_assert_not_reached ();
13839 mono_load_membase_to_load_mem (int opcode)
13841 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13842 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13844 case OP_LOAD_MEMBASE:
13845 return OP_LOAD_MEM;
13846 case OP_LOADU1_MEMBASE:
13847 return OP_LOADU1_MEM;
13848 case OP_LOADU2_MEMBASE:
13849 return OP_LOADU2_MEM;
13850 case OP_LOADI4_MEMBASE:
13851 return OP_LOADI4_MEM;
13852 case OP_LOADU4_MEMBASE:
13853 return OP_LOADU4_MEM;
13854 #if SIZEOF_REGISTER == 8
13855 case OP_LOADI8_MEMBASE:
13856 return OP_LOADI8_MEM;
13865 op_to_op_dest_membase (int store_opcode, int opcode)
13867 #if defined(TARGET_X86)
13868 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13873 return OP_X86_ADD_MEMBASE_REG;
13875 return OP_X86_SUB_MEMBASE_REG;
13877 return OP_X86_AND_MEMBASE_REG;
13879 return OP_X86_OR_MEMBASE_REG;
13881 return OP_X86_XOR_MEMBASE_REG;
13884 return OP_X86_ADD_MEMBASE_IMM;
13887 return OP_X86_SUB_MEMBASE_IMM;
13890 return OP_X86_AND_MEMBASE_IMM;
13893 return OP_X86_OR_MEMBASE_IMM;
13896 return OP_X86_XOR_MEMBASE_IMM;
13902 #if defined(TARGET_AMD64)
13903 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13908 return OP_X86_ADD_MEMBASE_REG;
13910 return OP_X86_SUB_MEMBASE_REG;
13912 return OP_X86_AND_MEMBASE_REG;
13914 return OP_X86_OR_MEMBASE_REG;
13916 return OP_X86_XOR_MEMBASE_REG;
13918 return OP_X86_ADD_MEMBASE_IMM;
13920 return OP_X86_SUB_MEMBASE_IMM;
13922 return OP_X86_AND_MEMBASE_IMM;
13924 return OP_X86_OR_MEMBASE_IMM;
13926 return OP_X86_XOR_MEMBASE_IMM;
13928 return OP_AMD64_ADD_MEMBASE_REG;
13930 return OP_AMD64_SUB_MEMBASE_REG;
13932 return OP_AMD64_AND_MEMBASE_REG;
13934 return OP_AMD64_OR_MEMBASE_REG;
13936 return OP_AMD64_XOR_MEMBASE_REG;
13939 return OP_AMD64_ADD_MEMBASE_IMM;
13942 return OP_AMD64_SUB_MEMBASE_IMM;
13945 return OP_AMD64_AND_MEMBASE_IMM;
13948 return OP_AMD64_OR_MEMBASE_IMM;
13951 return OP_AMD64_XOR_MEMBASE_IMM;
13961 op_to_op_store_membase (int store_opcode, int opcode)
13963 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13966 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13967 return OP_X86_SETEQ_MEMBASE;
13969 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13970 return OP_X86_SETNE_MEMBASE;
13978 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13981 /* FIXME: This has sign extension issues */
13983 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13984 return OP_X86_COMPARE_MEMBASE8_IMM;
13987 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13992 return OP_X86_PUSH_MEMBASE;
13993 case OP_COMPARE_IMM:
13994 case OP_ICOMPARE_IMM:
13995 return OP_X86_COMPARE_MEMBASE_IMM;
13998 return OP_X86_COMPARE_MEMBASE_REG;
14002 #ifdef TARGET_AMD64
14003 /* FIXME: This has sign extension issues */
14005 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14006 return OP_X86_COMPARE_MEMBASE8_IMM;
14011 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14012 return OP_X86_PUSH_MEMBASE;
14014 /* FIXME: This only works for 32 bit immediates
14015 case OP_COMPARE_IMM:
14016 case OP_LCOMPARE_IMM:
14017 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14018 return OP_AMD64_COMPARE_MEMBASE_IMM;
14020 case OP_ICOMPARE_IMM:
14021 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14022 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14026 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14027 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14028 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14029 return OP_AMD64_COMPARE_MEMBASE_REG;
14032 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14033 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14042 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14045 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14051 return OP_X86_COMPARE_REG_MEMBASE;
14053 return OP_X86_ADD_REG_MEMBASE;
14055 return OP_X86_SUB_REG_MEMBASE;
14057 return OP_X86_AND_REG_MEMBASE;
14059 return OP_X86_OR_REG_MEMBASE;
14061 return OP_X86_XOR_REG_MEMBASE;
14065 #ifdef TARGET_AMD64
14066 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14069 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14071 return OP_X86_ADD_REG_MEMBASE;
14073 return OP_X86_SUB_REG_MEMBASE;
14075 return OP_X86_AND_REG_MEMBASE;
14077 return OP_X86_OR_REG_MEMBASE;
14079 return OP_X86_XOR_REG_MEMBASE;
14081 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14085 return OP_AMD64_COMPARE_REG_MEMBASE;
14087 return OP_AMD64_ADD_REG_MEMBASE;
14089 return OP_AMD64_SUB_REG_MEMBASE;
14091 return OP_AMD64_AND_REG_MEMBASE;
14093 return OP_AMD64_OR_REG_MEMBASE;
14095 return OP_AMD64_XOR_REG_MEMBASE;
14104 mono_op_to_op_imm_noemul (int opcode)
14107 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14113 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14120 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14125 return mono_op_to_op_imm (opcode);
14130 * mono_handle_global_vregs:
14132 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14136 mono_handle_global_vregs (MonoCompile *cfg)
14138 gint32 *vreg_to_bb;
14139 MonoBasicBlock *bb;
14142 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14144 #ifdef MONO_ARCH_SIMD_INTRINSICS
14145 if (cfg->uses_simd_intrinsics)
14146 mono_simd_simplify_indirection (cfg);
14149 /* Find local vregs used in more than one bb */
14150 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14151 MonoInst *ins = bb->code;
14152 int block_num = bb->block_num;
14154 if (cfg->verbose_level > 2)
14155 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14158 for (; ins; ins = ins->next) {
14159 const char *spec = INS_INFO (ins->opcode);
14160 int regtype = 0, regindex;
14163 if (G_UNLIKELY (cfg->verbose_level > 2))
14164 mono_print_ins (ins);
14166 g_assert (ins->opcode >= MONO_CEE_LAST);
14168 for (regindex = 0; regindex < 4; regindex ++) {
14171 if (regindex == 0) {
14172 regtype = spec [MONO_INST_DEST];
14173 if (regtype == ' ')
14176 } else if (regindex == 1) {
14177 regtype = spec [MONO_INST_SRC1];
14178 if (regtype == ' ')
14181 } else if (regindex == 2) {
14182 regtype = spec [MONO_INST_SRC2];
14183 if (regtype == ' ')
14186 } else if (regindex == 3) {
14187 regtype = spec [MONO_INST_SRC3];
14188 if (regtype == ' ')
14193 #if SIZEOF_REGISTER == 4
14194 /* In the LLVM case, the long opcodes are not decomposed */
14195 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14197 * Since some instructions reference the original long vreg,
14198 * and some reference the two component vregs, it is quite hard
14199 * to determine when it needs to be global. So be conservative.
14201 if (!get_vreg_to_inst (cfg, vreg)) {
14202 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14204 if (cfg->verbose_level > 2)
14205 printf ("LONG VREG R%d made global.\n", vreg);
14209 * Make the component vregs volatile since the optimizations can
14210 * get confused otherwise.
14212 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14213 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14217 g_assert (vreg != -1);
14219 prev_bb = vreg_to_bb [vreg];
14220 if (prev_bb == 0) {
14221 /* 0 is a valid block num */
14222 vreg_to_bb [vreg] = block_num + 1;
14223 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14224 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14227 if (!get_vreg_to_inst (cfg, vreg)) {
14228 if (G_UNLIKELY (cfg->verbose_level > 2))
14229 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14233 if (vreg_is_ref (cfg, vreg))
14234 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14236 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14239 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14242 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14245 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14248 g_assert_not_reached ();
14252 /* Flag as having been used in more than one bb */
14253 vreg_to_bb [vreg] = -1;
14259 /* If a variable is used in only one bblock, convert it into a local vreg */
14260 for (i = 0; i < cfg->num_varinfo; i++) {
14261 MonoInst *var = cfg->varinfo [i];
14262 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14264 switch (var->type) {
14270 #if SIZEOF_REGISTER == 8
14273 #if !defined(TARGET_X86)
14274 /* Enabling this screws up the fp stack on x86 */
14277 if (mono_arch_is_soft_float ())
14281 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14285 /* Arguments are implicitly global */
14286 /* Putting R4 vars into registers doesn't work currently */
14287 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14288 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14290 * Make that the variable's liveness interval doesn't contain a call, since
14291 * that would cause the lvreg to be spilled, making the whole optimization
14294 /* This is too slow for JIT compilation */
14296 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14298 int def_index, call_index, ins_index;
14299 gboolean spilled = FALSE;
14304 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14305 const char *spec = INS_INFO (ins->opcode);
14307 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14308 def_index = ins_index;
14310 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14311 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14312 if (call_index > def_index) {
14318 if (MONO_IS_CALL (ins))
14319 call_index = ins_index;
14329 if (G_UNLIKELY (cfg->verbose_level > 2))
14330 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14331 var->flags |= MONO_INST_IS_DEAD;
14332 cfg->vreg_to_inst [var->dreg] = NULL;
14339 * Compress the varinfo and vars tables so the liveness computation is faster and
14340 * takes up less space.
14343 for (i = 0; i < cfg->num_varinfo; ++i) {
14344 MonoInst *var = cfg->varinfo [i];
14345 if (pos < i && cfg->locals_start == i)
14346 cfg->locals_start = pos;
14347 if (!(var->flags & MONO_INST_IS_DEAD)) {
14349 cfg->varinfo [pos] = cfg->varinfo [i];
14350 cfg->varinfo [pos]->inst_c0 = pos;
14351 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14352 cfg->vars [pos].idx = pos;
14353 #if SIZEOF_REGISTER == 4
14354 if (cfg->varinfo [pos]->type == STACK_I8) {
14355 /* Modify the two component vars too */
14358 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14359 var1->inst_c0 = pos;
14360 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14361 var1->inst_c0 = pos;
14368 cfg->num_varinfo = pos;
14369 if (cfg->locals_start > cfg->num_varinfo)
14370 cfg->locals_start = cfg->num_varinfo;
14374 * mono_allocate_gsharedvt_vars:
14376 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14377 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14380 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14384 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14386 for (i = 0; i < cfg->num_varinfo; ++i) {
14387 MonoInst *ins = cfg->varinfo [i];
14390 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14391 if (i >= cfg->locals_start) {
14393 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14394 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14395 ins->opcode = OP_GSHAREDVT_LOCAL;
14396 ins->inst_imm = idx;
14399 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14400 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14407 * mono_spill_global_vars:
14409 * Generate spill code for variables which are not allocated to registers,
14410 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14411 * code is generated which could be optimized by the local optimization passes.
14414 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14416 MonoBasicBlock *bb;
14418 int orig_next_vreg;
14419 guint32 *vreg_to_lvreg;
14421 guint32 i, lvregs_len;
14422 gboolean dest_has_lvreg = FALSE;
14423 MonoStackType stacktypes [128];
14424 MonoInst **live_range_start, **live_range_end;
14425 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14427 *need_local_opts = FALSE;
14429 memset (spec2, 0, sizeof (spec2));
14431 /* FIXME: Move this function to mini.c */
14432 stacktypes ['i'] = STACK_PTR;
14433 stacktypes ['l'] = STACK_I8;
14434 stacktypes ['f'] = STACK_R8;
14435 #ifdef MONO_ARCH_SIMD_INTRINSICS
14436 stacktypes ['x'] = STACK_VTYPE;
14439 #if SIZEOF_REGISTER == 4
14440 /* Create MonoInsts for longs */
14441 for (i = 0; i < cfg->num_varinfo; i++) {
14442 MonoInst *ins = cfg->varinfo [i];
14444 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14445 switch (ins->type) {
14450 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14453 g_assert (ins->opcode == OP_REGOFFSET);
14455 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14457 tree->opcode = OP_REGOFFSET;
14458 tree->inst_basereg = ins->inst_basereg;
14459 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14461 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14463 tree->opcode = OP_REGOFFSET;
14464 tree->inst_basereg = ins->inst_basereg;
14465 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14475 if (cfg->compute_gc_maps) {
14476 /* registers need liveness info even for !non refs */
14477 for (i = 0; i < cfg->num_varinfo; i++) {
14478 MonoInst *ins = cfg->varinfo [i];
14480 if (ins->opcode == OP_REGVAR)
14481 ins->flags |= MONO_INST_GC_TRACK;
14485 /* FIXME: widening and truncation */
14488 * As an optimization, when a variable allocated to the stack is first loaded into
14489 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14490 * the variable again.
14492 orig_next_vreg = cfg->next_vreg;
14493 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14494 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14498 * These arrays contain the first and last instructions accessing a given
14500 * Since we emit bblocks in the same order we process them here, and we
14501 * don't split live ranges, these will precisely describe the live range of
14502 * the variable, i.e. the instruction range where a valid value can be found
14503 * in the variables location.
14504 * The live range is computed using the liveness info computed by the liveness pass.
14505 * We can't use vmv->range, since that is an abstract live range, and we need
14506 * one which is instruction precise.
14507 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14509 /* FIXME: Only do this if debugging info is requested */
14510 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14511 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14512 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14513 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14515 /* Add spill loads/stores */
14516 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14519 if (cfg->verbose_level > 2)
14520 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14522 /* Clear vreg_to_lvreg array */
14523 for (i = 0; i < lvregs_len; i++)
14524 vreg_to_lvreg [lvregs [i]] = 0;
14528 MONO_BB_FOR_EACH_INS (bb, ins) {
14529 const char *spec = INS_INFO (ins->opcode);
14530 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14531 gboolean store, no_lvreg;
14532 int sregs [MONO_MAX_SRC_REGS];
14534 if (G_UNLIKELY (cfg->verbose_level > 2))
14535 mono_print_ins (ins);
14537 if (ins->opcode == OP_NOP)
14541 * We handle LDADDR here as well, since it can only be decomposed
14542 * when variable addresses are known.
14544 if (ins->opcode == OP_LDADDR) {
14545 MonoInst *var = (MonoInst *)ins->inst_p0;
14547 if (var->opcode == OP_VTARG_ADDR) {
14548 /* Happens on SPARC/S390 where vtypes are passed by reference */
14549 MonoInst *vtaddr = var->inst_left;
14550 if (vtaddr->opcode == OP_REGVAR) {
14551 ins->opcode = OP_MOVE;
14552 ins->sreg1 = vtaddr->dreg;
14554 else if (var->inst_left->opcode == OP_REGOFFSET) {
14555 ins->opcode = OP_LOAD_MEMBASE;
14556 ins->inst_basereg = vtaddr->inst_basereg;
14557 ins->inst_offset = vtaddr->inst_offset;
14560 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14561 /* gsharedvt arg passed by ref */
14562 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14564 ins->opcode = OP_LOAD_MEMBASE;
14565 ins->inst_basereg = var->inst_basereg;
14566 ins->inst_offset = var->inst_offset;
14567 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14568 MonoInst *load, *load2, *load3;
14569 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14570 int reg1, reg2, reg3;
14571 MonoInst *info_var = cfg->gsharedvt_info_var;
14572 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14576 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14579 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14581 g_assert (info_var);
14582 g_assert (locals_var);
14584 /* Mark the instruction used to compute the locals var as used */
14585 cfg->gsharedvt_locals_var_ins = NULL;
14587 /* Load the offset */
14588 if (info_var->opcode == OP_REGOFFSET) {
14589 reg1 = alloc_ireg (cfg);
14590 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14591 } else if (info_var->opcode == OP_REGVAR) {
14593 reg1 = info_var->dreg;
14595 g_assert_not_reached ();
14597 reg2 = alloc_ireg (cfg);
14598 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14599 /* Load the locals area address */
14600 reg3 = alloc_ireg (cfg);
14601 if (locals_var->opcode == OP_REGOFFSET) {
14602 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14603 } else if (locals_var->opcode == OP_REGVAR) {
14604 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14606 g_assert_not_reached ();
14608 /* Compute the address */
14609 ins->opcode = OP_PADD;
14613 mono_bblock_insert_before_ins (bb, ins, load3);
14614 mono_bblock_insert_before_ins (bb, load3, load2);
14616 mono_bblock_insert_before_ins (bb, load2, load);
14618 g_assert (var->opcode == OP_REGOFFSET);
14620 ins->opcode = OP_ADD_IMM;
14621 ins->sreg1 = var->inst_basereg;
14622 ins->inst_imm = var->inst_offset;
14625 *need_local_opts = TRUE;
14626 spec = INS_INFO (ins->opcode);
14629 if (ins->opcode < MONO_CEE_LAST) {
14630 mono_print_ins (ins);
14631 g_assert_not_reached ();
14635 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14639 if (MONO_IS_STORE_MEMBASE (ins)) {
14640 tmp_reg = ins->dreg;
14641 ins->dreg = ins->sreg2;
14642 ins->sreg2 = tmp_reg;
14645 spec2 [MONO_INST_DEST] = ' ';
14646 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14647 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14648 spec2 [MONO_INST_SRC3] = ' ';
14650 } else if (MONO_IS_STORE_MEMINDEX (ins))
14651 g_assert_not_reached ();
14656 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14657 printf ("\t %.3s %d", spec, ins->dreg);
14658 num_sregs = mono_inst_get_src_registers (ins, sregs);
14659 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14660 printf (" %d", sregs [srcindex]);
14667 regtype = spec [MONO_INST_DEST];
14668 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14671 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14672 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14673 MonoInst *store_ins;
14675 MonoInst *def_ins = ins;
14676 int dreg = ins->dreg; /* The original vreg */
14678 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14680 if (var->opcode == OP_REGVAR) {
14681 ins->dreg = var->dreg;
14682 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14684 * Instead of emitting a load+store, use a _membase opcode.
14686 g_assert (var->opcode == OP_REGOFFSET);
14687 if (ins->opcode == OP_MOVE) {
14691 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14692 ins->inst_basereg = var->inst_basereg;
14693 ins->inst_offset = var->inst_offset;
14696 spec = INS_INFO (ins->opcode);
14700 g_assert (var->opcode == OP_REGOFFSET);
14702 prev_dreg = ins->dreg;
14704 /* Invalidate any previous lvreg for this vreg */
14705 vreg_to_lvreg [ins->dreg] = 0;
14709 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14711 store_opcode = OP_STOREI8_MEMBASE_REG;
14714 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14716 #if SIZEOF_REGISTER != 8
14717 if (regtype == 'l') {
14718 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14719 mono_bblock_insert_after_ins (bb, ins, store_ins);
14720 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14721 mono_bblock_insert_after_ins (bb, ins, store_ins);
14722 def_ins = store_ins;
14727 g_assert (store_opcode != OP_STOREV_MEMBASE);
14729 /* Try to fuse the store into the instruction itself */
14730 /* FIXME: Add more instructions */
14731 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14732 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14733 ins->inst_imm = ins->inst_c0;
14734 ins->inst_destbasereg = var->inst_basereg;
14735 ins->inst_offset = var->inst_offset;
14736 spec = INS_INFO (ins->opcode);
14737 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14738 ins->opcode = store_opcode;
14739 ins->inst_destbasereg = var->inst_basereg;
14740 ins->inst_offset = var->inst_offset;
14744 tmp_reg = ins->dreg;
14745 ins->dreg = ins->sreg2;
14746 ins->sreg2 = tmp_reg;
14749 spec2 [MONO_INST_DEST] = ' ';
14750 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14751 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14752 spec2 [MONO_INST_SRC3] = ' ';
14754 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14755 // FIXME: The backends expect the base reg to be in inst_basereg
14756 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14758 ins->inst_basereg = var->inst_basereg;
14759 ins->inst_offset = var->inst_offset;
14760 spec = INS_INFO (ins->opcode);
14762 /* printf ("INS: "); mono_print_ins (ins); */
14763 /* Create a store instruction */
14764 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14766 /* Insert it after the instruction */
14767 mono_bblock_insert_after_ins (bb, ins, store_ins);
14769 def_ins = store_ins;
14772 * We can't assign ins->dreg to var->dreg here, since the
14773 * sregs could use it. So set a flag, and do it after
14776 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14777 dest_has_lvreg = TRUE;
14782 if (def_ins && !live_range_start [dreg]) {
14783 live_range_start [dreg] = def_ins;
14784 live_range_start_bb [dreg] = bb;
14787 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14790 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14791 tmp->inst_c1 = dreg;
14792 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14799 num_sregs = mono_inst_get_src_registers (ins, sregs);
14800 for (srcindex = 0; srcindex < 3; ++srcindex) {
14801 regtype = spec [MONO_INST_SRC1 + srcindex];
14802 sreg = sregs [srcindex];
14804 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14805 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14806 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14807 MonoInst *use_ins = ins;
14808 MonoInst *load_ins;
14809 guint32 load_opcode;
14811 if (var->opcode == OP_REGVAR) {
14812 sregs [srcindex] = var->dreg;
14813 //mono_inst_set_src_registers (ins, sregs);
14814 live_range_end [sreg] = use_ins;
14815 live_range_end_bb [sreg] = bb;
14817 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14820 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14821 /* var->dreg is a hreg */
14822 tmp->inst_c1 = sreg;
14823 mono_bblock_insert_after_ins (bb, ins, tmp);
14829 g_assert (var->opcode == OP_REGOFFSET);
14831 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14833 g_assert (load_opcode != OP_LOADV_MEMBASE);
14835 if (vreg_to_lvreg [sreg]) {
14836 g_assert (vreg_to_lvreg [sreg] != -1);
14838 /* The variable is already loaded to an lvreg */
14839 if (G_UNLIKELY (cfg->verbose_level > 2))
14840 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14841 sregs [srcindex] = vreg_to_lvreg [sreg];
14842 //mono_inst_set_src_registers (ins, sregs);
14846 /* Try to fuse the load into the instruction */
14847 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14848 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14849 sregs [0] = var->inst_basereg;
14850 //mono_inst_set_src_registers (ins, sregs);
14851 ins->inst_offset = var->inst_offset;
14852 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14853 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14854 sregs [1] = var->inst_basereg;
14855 //mono_inst_set_src_registers (ins, sregs);
14856 ins->inst_offset = var->inst_offset;
14858 if (MONO_IS_REAL_MOVE (ins)) {
14859 ins->opcode = OP_NOP;
14862 //printf ("%d ", srcindex); mono_print_ins (ins);
14864 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14866 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14867 if (var->dreg == prev_dreg) {
14869 * sreg refers to the value loaded by the load
14870 * emitted below, but we need to use ins->dreg
14871 * since it refers to the store emitted earlier.
14875 g_assert (sreg != -1);
14876 vreg_to_lvreg [var->dreg] = sreg;
14877 g_assert (lvregs_len < 1024);
14878 lvregs [lvregs_len ++] = var->dreg;
14882 sregs [srcindex] = sreg;
14883 //mono_inst_set_src_registers (ins, sregs);
14885 #if SIZEOF_REGISTER != 8
14886 if (regtype == 'l') {
14887 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14888 mono_bblock_insert_before_ins (bb, ins, load_ins);
14889 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14890 mono_bblock_insert_before_ins (bb, ins, load_ins);
14891 use_ins = load_ins;
14896 #if SIZEOF_REGISTER == 4
14897 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14899 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14900 mono_bblock_insert_before_ins (bb, ins, load_ins);
14901 use_ins = load_ins;
14905 if (var->dreg < orig_next_vreg) {
14906 live_range_end [var->dreg] = use_ins;
14907 live_range_end_bb [var->dreg] = bb;
14910 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14913 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14914 tmp->inst_c1 = var->dreg;
14915 mono_bblock_insert_after_ins (bb, ins, tmp);
14919 mono_inst_set_src_registers (ins, sregs);
14921 if (dest_has_lvreg) {
14922 g_assert (ins->dreg != -1);
14923 vreg_to_lvreg [prev_dreg] = ins->dreg;
14924 g_assert (lvregs_len < 1024);
14925 lvregs [lvregs_len ++] = prev_dreg;
14926 dest_has_lvreg = FALSE;
14930 tmp_reg = ins->dreg;
14931 ins->dreg = ins->sreg2;
14932 ins->sreg2 = tmp_reg;
14935 if (MONO_IS_CALL (ins)) {
14936 /* Clear vreg_to_lvreg array */
14937 for (i = 0; i < lvregs_len; i++)
14938 vreg_to_lvreg [lvregs [i]] = 0;
14940 } else if (ins->opcode == OP_NOP) {
14942 MONO_INST_NULLIFY_SREGS (ins);
14945 if (cfg->verbose_level > 2)
14946 mono_print_ins_index (1, ins);
14949 /* Extend the live range based on the liveness info */
14950 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14951 for (i = 0; i < cfg->num_varinfo; i ++) {
14952 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14954 if (vreg_is_volatile (cfg, vi->vreg))
14955 /* The liveness info is incomplete */
14958 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14959 /* Live from at least the first ins of this bb */
14960 live_range_start [vi->vreg] = bb->code;
14961 live_range_start_bb [vi->vreg] = bb;
14964 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14965 /* Live at least until the last ins of this bb */
14966 live_range_end [vi->vreg] = bb->last_ins;
14967 live_range_end_bb [vi->vreg] = bb;
14974 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14975 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14977 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14978 for (i = 0; i < cfg->num_varinfo; ++i) {
14979 int vreg = MONO_VARINFO (cfg, i)->vreg;
14982 if (live_range_start [vreg]) {
14983 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14985 ins->inst_c1 = vreg;
14986 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14988 if (live_range_end [vreg]) {
14989 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14991 ins->inst_c1 = vreg;
14992 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14993 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14995 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15000 if (cfg->gsharedvt_locals_var_ins) {
15001 /* Nullify if unused */
15002 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15003 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15006 g_free (live_range_start);
15007 g_free (live_range_end);
15008 g_free (live_range_start_bb);
15009 g_free (live_range_end_bb);
15014 * - use 'iadd' instead of 'int_add'
15015 * - handling ovf opcodes: decompose in method_to_ir.
15016 * - unify iregs/fregs
15017 * -> partly done, the missing parts are:
15018 * - a more complete unification would involve unifying the hregs as well, so
15019 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15020 * would no longer map to the machine hregs, so the code generators would need to
15021 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15022 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15023 * fp/non-fp branches speeds it up by about 15%.
15024 * - use sext/zext opcodes instead of shifts
15026 * - get rid of TEMPLOADs if possible and use vregs instead
15027 * - clean up usage of OP_P/OP_ opcodes
15028 * - cleanup usage of DUMMY_USE
15029 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15031 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15032 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15033 * - make sure handle_stack_args () is called before the branch is emitted
15034 * - when the new IR is done, get rid of all unused stuff
15035 * - COMPARE/BEQ as separate instructions or unify them ?
15036 * - keeping them separate allows specialized compare instructions like
15037 * compare_imm, compare_membase
15038 * - most back ends unify fp compare+branch, fp compare+ceq
15039 * - integrate mono_save_args into inline_method
15040 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15041 * - handle long shift opts on 32 bit platforms somehow: they require
15042 * 3 sregs (2 for arg1 and 1 for arg2)
15043 * - make byref a 'normal' type.
15044 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15045 * variable if needed.
15046 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15047 * like inline_method.
15048 * - remove inlining restrictions
15049 * - fix LNEG and enable cfold of INEG
15050 * - generalize x86 optimizations like ldelema as a peephole optimization
15051 * - add store_mem_imm for amd64
15052 * - optimize the loading of the interruption flag in the managed->native wrappers
15053 * - avoid special handling of OP_NOP in passes
15054 * - move code inserting instructions into one function/macro.
15055 * - try a coalescing phase after liveness analysis
15056 * - add float -> vreg conversion + local optimizations on !x86
15057 * - figure out how to handle decomposed branches during optimizations, ie.
15058 * compare+branch, op_jump_table+op_br etc.
15059 * - promote RuntimeXHandles to vregs
15060 * - vtype cleanups:
15061 * - add a NEW_VARLOADA_VREG macro
15062 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15063 * accessing vtype fields.
15064 * - get rid of I8CONST on 64 bit platforms
15065 * - dealing with the increase in code size due to branches created during opcode
15067 * - use extended basic blocks
15068 * - all parts of the JIT
15069 * - handle_global_vregs () && local regalloc
15070 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15071 * - sources of increase in code size:
15074 * - isinst and castclass
15075 * - lvregs not allocated to global registers even if used multiple times
15076 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15078 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15079 * - add all micro optimizations from the old JIT
15080 * - put tree optimizations into the deadce pass
15081 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15082 * specific function.
15083 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15084 * fcompare + branchCC.
15085 * - create a helper function for allocating a stack slot, taking into account
15086 * MONO_CFG_HAS_SPILLUP.
15088 * - merge the ia64 switch changes.
15089 * - optimize mono_regstate2_alloc_int/float.
15090 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15091 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15092 * parts of the tree could be separated by other instructions, killing the tree
15093 * arguments, or stores killing loads etc. Also, should we fold loads into other
15094 * instructions if the result of the load is used multiple times ?
15095 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15096 * - LAST MERGE: 108395.
15097 * - when returning vtypes in registers, generate IR and append it to the end of the
15098 * last bb instead of doing it in the epilog.
15099 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15107 - When to decompose opcodes:
15108 - earlier: this makes some optimizations hard to implement, since the low level IR
15109 no longer contains the neccessary information. But it is easier to do.
15110 - later: harder to implement, enables more optimizations.
15111 - Branches inside bblocks:
15112 - created when decomposing complex opcodes.
15113 - branches to another bblock: harmless, but not tracked by the branch
15114 optimizations, so need to branch to a label at the start of the bblock.
15115 - branches to inside the same bblock: very problematic, trips up the local
15116 reg allocator. Can be fixed by spitting the current bblock, but that is a
15117 complex operation, since some local vregs can become global vregs etc.
15118 - Local/global vregs:
15119 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15120 local register allocator.
15121 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15122 structure, created by mono_create_var (). Assigned to hregs or the stack by
15123 the global register allocator.
15124 - When to do optimizations like alu->alu_imm:
15125 - earlier -> saves work later on since the IR will be smaller/simpler
15126 - later -> can work on more instructions
15127 - Handling of valuetypes:
15128 - When a vtype is pushed on the stack, a new temporary is created, an
15129 instruction computing its address (LDADDR) is emitted and pushed on
15130 the stack. Need to optimize cases when the vtype is used immediately as in
15131 argument passing, stloc etc.
15132 - Instead of the to_end stuff in the old JIT, simply call the function handling
15133 the values on the stack before emitting the last instruction of the bb.
15136 #endif /* DISABLE_JIT */