2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/monitor.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
70 #include "jit-icalls.h"
72 #include "debugger-agent.h"
73 #include "seq-points.h"
74 #include "aot-compiler.h"
75 #include "mini-llvm.h"
77 #define BRANCH_COST 10
78 #define INLINE_LENGTH_LIMIT 20
80 /* These have 'cfg' as an implicit argument */
81 #define INLINE_FAILURE(msg) do { \
82 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
83 inline_failure (cfg, msg); \
84 goto exception_exit; \
87 #define CHECK_CFG_EXCEPTION do {\
88 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
89 goto exception_exit; \
91 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
92 method_access_failure ((cfg), (method), (cmethod)); \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
163 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
166 * Instruction metadata
174 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
175 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
186 /* keep in sync with the enum in mini.h */
189 #include "mini-ops.h"
194 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
195 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
197 * This should contain the index of the last sreg + 1. This is not the same
198 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
200 const gint8 ins_sreg_counts[] = {
201 #include "mini-ops.h"
206 #define MONO_INIT_VARINFO(vi,id) do { \
207 (vi)->range.first_use.pos.bid = 0xffff; \
213 mono_alloc_ireg (MonoCompile *cfg)
215 return alloc_ireg (cfg);
219 mono_alloc_lreg (MonoCompile *cfg)
221 return alloc_lreg (cfg);
225 mono_alloc_freg (MonoCompile *cfg)
227 return alloc_freg (cfg);
231 mono_alloc_preg (MonoCompile *cfg)
233 return alloc_preg (cfg);
237 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
239 return alloc_dreg (cfg, stack_type);
243 * mono_alloc_ireg_ref:
245 * Allocate an IREG, and mark it as holding a GC ref.
248 mono_alloc_ireg_ref (MonoCompile *cfg)
250 return alloc_ireg_ref (cfg);
254 * mono_alloc_ireg_mp:
256 * Allocate an IREG, and mark it as holding a managed pointer.
259 mono_alloc_ireg_mp (MonoCompile *cfg)
261 return alloc_ireg_mp (cfg);
265 * mono_alloc_ireg_copy:
267 * Allocate an IREG with the same GC type as VREG.
270 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
272 if (vreg_is_ref (cfg, vreg))
273 return alloc_ireg_ref (cfg);
274 else if (vreg_is_mp (cfg, vreg))
275 return alloc_ireg_mp (cfg);
277 return alloc_ireg (cfg);
281 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
286 type = mini_get_underlying_type (type);
288 switch (type->type) {
301 case MONO_TYPE_FNPTR:
303 case MONO_TYPE_CLASS:
304 case MONO_TYPE_STRING:
305 case MONO_TYPE_OBJECT:
306 case MONO_TYPE_SZARRAY:
307 case MONO_TYPE_ARRAY:
311 #if SIZEOF_REGISTER == 8
317 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
320 case MONO_TYPE_VALUETYPE:
321 if (type->data.klass->enumtype) {
322 type = mono_class_enum_basetype (type->data.klass);
325 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 case MONO_TYPE_TYPEDBYREF:
330 case MONO_TYPE_GENERICINST:
331 type = &type->data.generic_class->container_class->byval_arg;
335 g_assert (cfg->gshared);
336 if (mini_type_var_is_vt (type))
339 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
341 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
347 mono_print_bb (MonoBasicBlock *bb, const char *msg)
352 printf ("\n%s %d: [IN: ", msg, bb->block_num);
353 for (i = 0; i < bb->in_count; ++i)
354 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
356 for (i = 0; i < bb->out_count; ++i)
357 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
359 for (tree = bb->code; tree; tree = tree->next)
360 mono_print_ins_index (-1, tree);
364 mono_create_helper_signatures (void)
366 helper_sig_domain_get = mono_create_icall_signature ("ptr");
367 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
368 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
384 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
395 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, cfg->cbb, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, cfg->cbb, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, cfg->cbb, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (cfg->cbb, cmp); \
559 MONO_ADD_INS (cfg->cbb, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 for (i = 0; i < header->num_clauses; ++i) {
667 clause = &header->clauses [i];
669 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
670 return ((i + 1) << 8) | clause->flags;
677 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
679 MonoMethodHeader *header = cfg->header;
680 MonoExceptionClause *clause;
684 for (i = 0; i < header->num_clauses; ++i) {
685 clause = &header->clauses [i];
686 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
687 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
688 if (clause->flags == type)
689 res = g_list_append (res, clause);
696 mono_create_spvar_for_region (MonoCompile *cfg, int region)
700 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
704 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
705 /* prevent it from being register allocated */
706 var->flags |= MONO_INST_VOLATILE;
708 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
712 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
714 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
718 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
722 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
726 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
727 /* prevent it from being register allocated */
728 var->flags |= MONO_INST_VOLATILE;
730 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
736 * Returns the type used in the eval stack when @type is loaded.
737 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
740 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
744 type = mini_get_underlying_type (type);
745 inst->klass = klass = mono_class_from_mono_type (type);
747 inst->type = STACK_MP;
752 switch (type->type) {
754 inst->type = STACK_INV;
762 inst->type = STACK_I4;
767 case MONO_TYPE_FNPTR:
768 inst->type = STACK_PTR;
770 case MONO_TYPE_CLASS:
771 case MONO_TYPE_STRING:
772 case MONO_TYPE_OBJECT:
773 case MONO_TYPE_SZARRAY:
774 case MONO_TYPE_ARRAY:
775 inst->type = STACK_OBJ;
779 inst->type = STACK_I8;
782 inst->type = cfg->r4_stack_type;
785 inst->type = STACK_R8;
787 case MONO_TYPE_VALUETYPE:
788 if (type->data.klass->enumtype) {
789 type = mono_class_enum_basetype (type->data.klass);
793 inst->type = STACK_VTYPE;
796 case MONO_TYPE_TYPEDBYREF:
797 inst->klass = mono_defaults.typed_reference_class;
798 inst->type = STACK_VTYPE;
800 case MONO_TYPE_GENERICINST:
801 type = &type->data.generic_class->container_class->byval_arg;
805 g_assert (cfg->gshared);
806 if (mini_is_gsharedvt_type (type)) {
807 g_assert (cfg->gsharedvt);
808 inst->type = STACK_VTYPE;
810 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
814 g_error ("unknown type 0x%02x in eval stack type", type->type);
819 * The following tables are used to quickly validate the IL code in type_from_op ().
822 bin_num_table [STACK_MAX] [STACK_MAX] = {
823 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
828 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
836 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
839 /* reduce the size of this table */
841 bin_int_table [STACK_MAX] [STACK_MAX] = {
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
853 bin_comp_table [STACK_MAX] [STACK_MAX] = {
854 /* Inv i L p F & O vt r4 */
856 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
857 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
858 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
859 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
860 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
861 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
862 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
863 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
866 /* reduce the size of this table */
868 shift_table [STACK_MAX] [STACK_MAX] = {
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
880 * Tables to map from the non-specific opcode to the matching
881 * type-specific opcode.
883 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
885 binops_op_map [STACK_MAX] = {
886 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
889 /* handles from CEE_NEG to CEE_CONV_U8 */
891 unops_op_map [STACK_MAX] = {
892 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
895 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
897 ovfops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
901 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
903 ovf2ops_op_map [STACK_MAX] = {
904 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
907 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
909 ovf3ops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
913 /* handles from CEE_BEQ to CEE_BLT_UN */
915 beqops_op_map [STACK_MAX] = {
916 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
919 /* handles from CEE_CEQ to CEE_CLT_UN */
921 ceqops_op_map [STACK_MAX] = {
922 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
926 * Sets ins->type (the type on the eval stack) according to the
927 * type of the opcode and the arguments to it.
928 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
930 * FIXME: this function sets ins->type unconditionally in some cases, but
931 * it should set it to invalid for some types (a conv.x on an object)
934 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
936 switch (ins->opcode) {
943 /* FIXME: check unverifiable args for STACK_MP */
944 ins->type = bin_num_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_int_table [src1->type] [src2->type];
953 ins->opcode += binops_op_map [ins->type];
958 ins->type = shift_table [src1->type] [src2->type];
959 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
965 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
966 ins->opcode = OP_LCOMPARE;
967 else if (src1->type == STACK_R4)
968 ins->opcode = OP_RCOMPARE;
969 else if (src1->type == STACK_R8)
970 ins->opcode = OP_FCOMPARE;
972 ins->opcode = OP_ICOMPARE;
974 case OP_ICOMPARE_IMM:
975 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
976 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
977 ins->opcode = OP_LCOMPARE_IMM;
989 ins->opcode += beqops_op_map [src1->type];
992 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
993 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1000 ins->opcode += ceqops_op_map [src1->type];
1004 ins->type = neg_table [src1->type];
1005 ins->opcode += unops_op_map [ins->type];
1008 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1009 ins->type = src1->type;
1011 ins->type = STACK_INV;
1012 ins->opcode += unops_op_map [ins->type];
1018 ins->type = STACK_I4;
1019 ins->opcode += unops_op_map [src1->type];
1022 ins->type = STACK_R8;
1023 switch (src1->type) {
1026 ins->opcode = OP_ICONV_TO_R_UN;
1029 ins->opcode = OP_LCONV_TO_R_UN;
1033 case CEE_CONV_OVF_I1:
1034 case CEE_CONV_OVF_U1:
1035 case CEE_CONV_OVF_I2:
1036 case CEE_CONV_OVF_U2:
1037 case CEE_CONV_OVF_I4:
1038 case CEE_CONV_OVF_U4:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf3ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I_UN:
1043 case CEE_CONV_OVF_U_UN:
1044 ins->type = STACK_PTR;
1045 ins->opcode += ovf2ops_op_map [src1->type];
1047 case CEE_CONV_OVF_I1_UN:
1048 case CEE_CONV_OVF_I2_UN:
1049 case CEE_CONV_OVF_I4_UN:
1050 case CEE_CONV_OVF_U1_UN:
1051 case CEE_CONV_OVF_U2_UN:
1052 case CEE_CONV_OVF_U4_UN:
1053 ins->type = STACK_I4;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1057 ins->type = STACK_PTR;
1058 switch (src1->type) {
1060 ins->opcode = OP_ICONV_TO_U;
1064 #if SIZEOF_VOID_P == 8
1065 ins->opcode = OP_LCONV_TO_U;
1067 ins->opcode = OP_MOVE;
1071 ins->opcode = OP_LCONV_TO_U;
1074 ins->opcode = OP_FCONV_TO_U;
1080 ins->type = STACK_I8;
1081 ins->opcode += unops_op_map [src1->type];
1083 case CEE_CONV_OVF_I8:
1084 case CEE_CONV_OVF_U8:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf3ops_op_map [src1->type];
1088 case CEE_CONV_OVF_U8_UN:
1089 case CEE_CONV_OVF_I8_UN:
1090 ins->type = STACK_I8;
1091 ins->opcode += ovf2ops_op_map [src1->type];
1094 ins->type = cfg->r4_stack_type;
1095 ins->opcode += unops_op_map [src1->type];
1098 ins->type = STACK_R8;
1099 ins->opcode += unops_op_map [src1->type];
1102 ins->type = STACK_R8;
1106 ins->type = STACK_I4;
1107 ins->opcode += ovfops_op_map [src1->type];
1110 case CEE_CONV_OVF_I:
1111 case CEE_CONV_OVF_U:
1112 ins->type = STACK_PTR;
1113 ins->opcode += ovfops_op_map [src1->type];
1116 case CEE_ADD_OVF_UN:
1118 case CEE_MUL_OVF_UN:
1120 case CEE_SUB_OVF_UN:
1121 ins->type = bin_num_table [src1->type] [src2->type];
1122 ins->opcode += ovfops_op_map [src1->type];
1123 if (ins->type == STACK_R8)
1124 ins->type = STACK_INV;
1126 case OP_LOAD_MEMBASE:
1127 ins->type = STACK_PTR;
1129 case OP_LOADI1_MEMBASE:
1130 case OP_LOADU1_MEMBASE:
1131 case OP_LOADI2_MEMBASE:
1132 case OP_LOADU2_MEMBASE:
1133 case OP_LOADI4_MEMBASE:
1134 case OP_LOADU4_MEMBASE:
1135 ins->type = STACK_PTR;
1137 case OP_LOADI8_MEMBASE:
1138 ins->type = STACK_I8;
1140 case OP_LOADR4_MEMBASE:
1141 ins->type = cfg->r4_stack_type;
1143 case OP_LOADR8_MEMBASE:
1144 ins->type = STACK_R8;
1147 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1151 if (ins->type == STACK_MP)
1152 ins->klass = mono_defaults.object_class;
1157 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1163 param_table [STACK_MAX] [STACK_MAX] = {
1168 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1173 switch (args->type) {
1183 for (i = 0; i < sig->param_count; ++i) {
1184 switch (args [i].type) {
1188 if (!sig->params [i]->byref)
1192 if (sig->params [i]->byref)
1194 switch (sig->params [i]->type) {
1195 case MONO_TYPE_CLASS:
1196 case MONO_TYPE_STRING:
1197 case MONO_TYPE_OBJECT:
1198 case MONO_TYPE_SZARRAY:
1199 case MONO_TYPE_ARRAY:
1206 if (sig->params [i]->byref)
1208 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1217 /*if (!param_table [args [i].type] [sig->params [i]->type])
1225 * When we need a pointer to the current domain many times in a method, we
1226 * call mono_domain_get() once and we store the result in a local variable.
1227 * This function returns the variable that represents the MonoDomain*.
1229 inline static MonoInst *
1230 mono_get_domainvar (MonoCompile *cfg)
1232 if (!cfg->domainvar)
1233 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1234 return cfg->domainvar;
1238 * The got_var contains the address of the Global Offset Table when AOT
1242 mono_get_got_var (MonoCompile *cfg)
1244 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1246 if (!cfg->got_var) {
1247 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1249 return cfg->got_var;
1253 mono_get_vtable_var (MonoCompile *cfg)
1255 g_assert (cfg->gshared);
1257 if (!cfg->rgctx_var) {
1258 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1259 /* force the var to be stack allocated */
1260 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1263 return cfg->rgctx_var;
1267 type_from_stack_type (MonoInst *ins) {
1268 switch (ins->type) {
1269 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1270 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1271 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1272 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1273 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1275 return &ins->klass->this_arg;
1276 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1277 case STACK_VTYPE: return &ins->klass->byval_arg;
1279 g_error ("stack type %d to monotype not handled\n", ins->type);
1284 static G_GNUC_UNUSED int
1285 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1287 t = mono_type_get_underlying_type (t);
1299 case MONO_TYPE_FNPTR:
1301 case MONO_TYPE_CLASS:
1302 case MONO_TYPE_STRING:
1303 case MONO_TYPE_OBJECT:
1304 case MONO_TYPE_SZARRAY:
1305 case MONO_TYPE_ARRAY:
1311 return cfg->r4_stack_type;
1314 case MONO_TYPE_VALUETYPE:
1315 case MONO_TYPE_TYPEDBYREF:
1317 case MONO_TYPE_GENERICINST:
1318 if (mono_type_generic_inst_is_valuetype (t))
1324 g_assert_not_reached ();
1331 array_access_to_klass (int opcode)
1335 return mono_defaults.byte_class;
1337 return mono_defaults.uint16_class;
1340 return mono_defaults.int_class;
1343 return mono_defaults.sbyte_class;
1346 return mono_defaults.int16_class;
1349 return mono_defaults.int32_class;
1351 return mono_defaults.uint32_class;
1354 return mono_defaults.int64_class;
1357 return mono_defaults.single_class;
1360 return mono_defaults.double_class;
1361 case CEE_LDELEM_REF:
1362 case CEE_STELEM_REF:
1363 return mono_defaults.object_class;
1365 g_assert_not_reached ();
1371 * We try to share variables when possible
1374 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1379 /* inlining can result in deeper stacks */
1380 if (slot >= cfg->header->max_stack)
1381 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 pos = ins->type - 1 + slot * STACK_MAX;
1385 switch (ins->type) {
1392 if ((vnum = cfg->intvars [pos]))
1393 return cfg->varinfo [vnum];
1394 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1395 cfg->intvars [pos] = res->inst_c0;
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1404 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1407 * Don't use this if a generic_context is set, since that means AOT can't
1408 * look up the method using just the image+token.
1409 * table == 0 means this is a reference made from a wrapper.
1411 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1412 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1413 jump_info_token->image = image;
1414 jump_info_token->token = token;
1415 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1420 * This function is called to handle items that are left on the evaluation stack
1421 * at basic block boundaries. What happens is that we save the values to local variables
1422 * and we reload them later when first entering the target basic block (with the
1423 * handle_loaded_temps () function).
1424 * A single joint point will use the same variables (stored in the array bb->out_stack or
1425 * bb->in_stack, if the basic block is before or after the joint point).
1427 * This function needs to be called _before_ emitting the last instruction of
1428 * the bb (i.e. before emitting a branch).
1429 * If the stack merge fails at a join point, cfg->unverifiable is set.
1432 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1435 MonoBasicBlock *bb = cfg->cbb;
1436 MonoBasicBlock *outb;
1437 MonoInst *inst, **locals;
1442 if (cfg->verbose_level > 3)
1443 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1444 if (!bb->out_scount) {
1445 bb->out_scount = count;
1446 //printf ("bblock %d has out:", bb->block_num);
1448 for (i = 0; i < bb->out_count; ++i) {
1449 outb = bb->out_bb [i];
1450 /* exception handlers are linked, but they should not be considered for stack args */
1451 if (outb->flags & BB_EXCEPTION_HANDLER)
1453 //printf (" %d", outb->block_num);
1454 if (outb->in_stack) {
1456 bb->out_stack = outb->in_stack;
1462 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1463 for (i = 0; i < count; ++i) {
1465 * try to reuse temps already allocated for this purpouse, if they occupy the same
1466 * stack slot and if they are of the same type.
1467 * This won't cause conflicts since if 'local' is used to
1468 * store one of the values in the in_stack of a bblock, then
1469 * the same variable will be used for the same outgoing stack
1471 * This doesn't work when inlining methods, since the bblocks
1472 * in the inlined methods do not inherit their in_stack from
1473 * the bblock they are inlined to. See bug #58863 for an
1476 if (cfg->inlined_method)
1477 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1479 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1484 for (i = 0; i < bb->out_count; ++i) {
1485 outb = bb->out_bb [i];
1486 /* exception handlers are linked, but they should not be considered for stack args */
1487 if (outb->flags & BB_EXCEPTION_HANDLER)
1489 if (outb->in_scount) {
1490 if (outb->in_scount != bb->out_scount) {
1491 cfg->unverifiable = TRUE;
1494 continue; /* check they are the same locals */
1496 outb->in_scount = count;
1497 outb->in_stack = bb->out_stack;
1500 locals = bb->out_stack;
1502 for (i = 0; i < count; ++i) {
1503 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1504 inst->cil_code = sp [i]->cil_code;
1505 sp [i] = locals [i];
1506 if (cfg->verbose_level > 3)
1507 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1511 * It is possible that the out bblocks already have in_stack assigned, and
1512 * the in_stacks differ. In this case, we will store to all the different
1519 /* Find a bblock which has a different in_stack */
1521 while (bindex < bb->out_count) {
1522 outb = bb->out_bb [bindex];
1523 /* exception handlers are linked, but they should not be considered for stack args */
1524 if (outb->flags & BB_EXCEPTION_HANDLER) {
1528 if (outb->in_stack != locals) {
1529 for (i = 0; i < count; ++i) {
1530 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1531 inst->cil_code = sp [i]->cil_code;
1532 sp [i] = locals [i];
1533 if (cfg->verbose_level > 3)
1534 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1536 locals = outb->in_stack;
1546 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1550 if (cfg->compile_aot) {
1551 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1557 ji.type = patch_type;
1558 ji.data.target = data;
1559 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1560 mono_error_assert_ok (&error);
1562 EMIT_NEW_PCONST (cfg, ins, target);
1568 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1570 int ibitmap_reg = alloc_preg (cfg);
1571 #ifdef COMPRESSED_INTERFACE_BITMAP
1573 MonoInst *res, *ins;
1574 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1575 MONO_ADD_INS (cfg->cbb, ins);
1577 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1578 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1579 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1581 int ibitmap_byte_reg = alloc_preg (cfg);
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1585 if (cfg->compile_aot) {
1586 int iid_reg = alloc_preg (cfg);
1587 int shifted_iid_reg = alloc_preg (cfg);
1588 int ibitmap_byte_address_reg = alloc_preg (cfg);
1589 int masked_iid_reg = alloc_preg (cfg);
1590 int iid_one_bit_reg = alloc_preg (cfg);
1591 int iid_bit_reg = alloc_preg (cfg);
1592 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1594 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1597 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1608 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1609 * stored in "klass_reg" implements the interface "klass".
1612 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1614 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1618 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1619 * stored in "vtable_reg" implements the interface "klass".
1622 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1624 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1628 * Emit code which checks whenever the interface id of @klass is smaller than
1629 * than the value given by max_iid_reg.
1632 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1633 MonoBasicBlock *false_target)
1635 if (cfg->compile_aot) {
1636 int iid_reg = alloc_preg (cfg);
1637 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1638 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1648 /* Same as above, but obtains max_iid from a vtable */
1650 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1651 MonoBasicBlock *false_target)
1653 int max_iid_reg = alloc_preg (cfg);
1655 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1656 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1659 /* Same as above, but obtains max_iid from a klass */
1661 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1662 MonoBasicBlock *false_target)
1664 int max_iid_reg = alloc_preg (cfg);
1666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1667 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1671 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1673 int idepth_reg = alloc_preg (cfg);
1674 int stypes_reg = alloc_preg (cfg);
1675 int stype = alloc_preg (cfg);
1677 mono_class_setup_supertypes (klass);
1679 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1687 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1688 } else if (cfg->compile_aot) {
1689 int const_reg = alloc_preg (cfg);
1690 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1699 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1701 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1705 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 int intf_reg = alloc_preg (cfg);
1709 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1710 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1715 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1719 * Variant of the above that takes a register to the class, not the vtable.
1722 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1724 int intf_bit_reg = alloc_preg (cfg);
1726 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1727 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1732 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1736 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1739 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1741 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1742 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1744 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1748 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1750 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1754 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1756 if (cfg->compile_aot) {
1757 int const_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1759 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1767 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1770 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1773 int rank_reg = alloc_preg (cfg);
1774 int eclass_reg = alloc_preg (cfg);
1776 g_assert (!klass_inst);
1777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1780 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1782 if (klass->cast_class == mono_defaults.object_class) {
1783 int parent_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1785 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1786 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1787 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1788 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1789 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1790 } else if (klass->cast_class == mono_defaults.enum_class) {
1791 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1792 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1793 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1795 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1796 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1799 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1800 /* Check that the object is a vector too */
1801 int bounds_reg = alloc_preg (cfg);
1802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1804 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1807 int idepth_reg = alloc_preg (cfg);
1808 int stypes_reg = alloc_preg (cfg);
1809 int stype = alloc_preg (cfg);
1811 mono_class_setup_supertypes (klass);
1813 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1816 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1820 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1825 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1827 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1831 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1835 g_assert (val == 0);
1840 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1849 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1851 #if SIZEOF_REGISTER == 8
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1859 val_reg = alloc_preg (cfg);
1861 if (SIZEOF_REGISTER == 8)
1862 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1864 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1867 /* This could be optimized further if neccesary */
1869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1876 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1907 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1914 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1915 g_assert (size < 10000);
1918 /* This could be optimized further if neccesary */
1920 cur_reg = alloc_preg (cfg);
1921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1929 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1931 cur_reg = alloc_preg (cfg);
1932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1949 cur_reg = alloc_preg (cfg);
1950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1957 cur_reg = alloc_preg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1967 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1971 if (cfg->compile_aot) {
1972 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1973 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1975 ins->sreg2 = c->dreg;
1976 MONO_ADD_INS (cfg->cbb, ins);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1980 ins->inst_offset = mini_get_tls_offset (tls_key);
1981 MONO_ADD_INS (cfg->cbb, ins);
1988 * Emit IR to push the current LMF onto the LMF stack.
1991 emit_push_lmf (MonoCompile *cfg)
1994 * Emit IR to push the LMF:
1995 * lmf_addr = <lmf_addr from tls>
1996 * lmf->lmf_addr = lmf_addr
1997 * lmf->prev_lmf = *lmf_addr
2000 int lmf_reg, prev_lmf_reg;
2001 MonoInst *ins, *lmf_ins;
2006 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2007 /* Load current lmf */
2008 lmf_ins = mono_get_lmf_intrinsic (cfg);
2010 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2012 lmf_reg = ins->dreg;
2013 /* Save previous_lmf */
2014 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2016 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2019 * Store lmf_addr in a variable, so it can be allocated to a global register.
2021 if (!cfg->lmf_addr_var)
2022 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2025 ins = mono_get_jit_tls_intrinsic (cfg);
2027 int jit_tls_dreg = ins->dreg;
2029 MONO_ADD_INS (cfg->cbb, ins);
2030 lmf_reg = alloc_preg (cfg);
2031 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2033 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2036 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2038 MONO_ADD_INS (cfg->cbb, lmf_ins);
2041 MonoInst *args [16], *jit_tls_ins, *ins;
2043 /* Inline mono_get_lmf_addr () */
2044 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2046 /* Load mono_jit_tls_id */
2047 if (cfg->compile_aot)
2048 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2050 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2051 /* call pthread_getspecific () */
2052 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2053 /* lmf_addr = &jit_tls->lmf */
2054 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2057 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2061 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2063 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2064 lmf_reg = ins->dreg;
2066 prev_lmf_reg = alloc_preg (cfg);
2067 /* Save previous_lmf */
2068 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2069 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2071 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2078 * Emit IR to pop the current LMF from the LMF stack.
2081 emit_pop_lmf (MonoCompile *cfg)
2083 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2089 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2090 lmf_reg = ins->dreg;
2092 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2093 /* Load previous_lmf */
2094 prev_lmf_reg = alloc_preg (cfg);
2095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2097 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2100 * Emit IR to pop the LMF:
2101 * *(lmf->lmf_addr) = lmf->prev_lmf
2103 /* This could be called before emit_push_lmf () */
2104 if (!cfg->lmf_addr_var)
2105 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2106 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2108 prev_lmf_reg = alloc_preg (cfg);
2109 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2110 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2115 emit_instrumentation_call (MonoCompile *cfg, void *func)
2117 MonoInst *iargs [1];
2120 * Avoid instrumenting inlined methods since it can
2121 * distort profiling results.
2123 if (cfg->method != cfg->current_method)
2126 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2127 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2128 mono_emit_jit_icall (cfg, func, iargs);
2133 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2136 type = mini_get_underlying_type (type);
2137 switch (type->type) {
2138 case MONO_TYPE_VOID:
2139 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2146 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2150 case MONO_TYPE_FNPTR:
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2152 case MONO_TYPE_CLASS:
2153 case MONO_TYPE_STRING:
2154 case MONO_TYPE_OBJECT:
2155 case MONO_TYPE_SZARRAY:
2156 case MONO_TYPE_ARRAY:
2157 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2160 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2163 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2165 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2167 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2168 case MONO_TYPE_VALUETYPE:
2169 if (type->data.klass->enumtype) {
2170 type = mono_class_enum_basetype (type->data.klass);
2173 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2174 case MONO_TYPE_TYPEDBYREF:
2175 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2176 case MONO_TYPE_GENERICINST:
2177 type = &type->data.generic_class->container_class->byval_arg;
2180 case MONO_TYPE_MVAR:
2182 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2184 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2190 * target_type_is_incompatible:
2191 * @cfg: MonoCompile context
2193 * Check that the item @arg on the evaluation stack can be stored
2194 * in the target type (can be a local, or field, etc).
2195 * The cfg arg can be used to check if we need verification or just
2198 * Returns: non-0 value if arg can't be stored on a target.
2201 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2203 MonoType *simple_type;
2206 if (target->byref) {
2207 /* FIXME: check that the pointed to types match */
2208 if (arg->type == STACK_MP) {
2209 MonoClass *base_class = mono_class_from_mono_type (target);
2210 /* This is needed to handle gshared types + ldaddr */
2211 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2212 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2214 if (arg->type == STACK_PTR)
2219 simple_type = mini_get_underlying_type (target);
2220 switch (simple_type->type) {
2221 case MONO_TYPE_VOID:
2229 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2233 /* STACK_MP is needed when setting pinned locals */
2234 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2239 case MONO_TYPE_FNPTR:
2241 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2242 * in native int. (#688008).
2244 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2247 case MONO_TYPE_CLASS:
2248 case MONO_TYPE_STRING:
2249 case MONO_TYPE_OBJECT:
2250 case MONO_TYPE_SZARRAY:
2251 case MONO_TYPE_ARRAY:
2252 if (arg->type != STACK_OBJ)
2254 /* FIXME: check type compatibility */
2258 if (arg->type != STACK_I8)
2262 if (arg->type != cfg->r4_stack_type)
2266 if (arg->type != STACK_R8)
2269 case MONO_TYPE_VALUETYPE:
2270 if (arg->type != STACK_VTYPE)
2272 klass = mono_class_from_mono_type (simple_type);
2273 if (klass != arg->klass)
2276 case MONO_TYPE_TYPEDBYREF:
2277 if (arg->type != STACK_VTYPE)
2279 klass = mono_class_from_mono_type (simple_type);
2280 if (klass != arg->klass)
2283 case MONO_TYPE_GENERICINST:
2284 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2285 MonoClass *target_class;
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 target_class = mono_class_from_mono_type (target);
2290 /* The second cases is needed when doing partial sharing */
2291 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2295 if (arg->type != STACK_OBJ)
2297 /* FIXME: check type compatibility */
2301 case MONO_TYPE_MVAR:
2302 g_assert (cfg->gshared);
2303 if (mini_type_var_is_vt (simple_type)) {
2304 if (arg->type != STACK_VTYPE)
2307 if (arg->type != STACK_OBJ)
2312 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2318 * Prepare arguments for passing to a function call.
2319 * Return a non-zero value if the arguments can't be passed to the given
2321 * The type checks are not yet complete and some conversions may need
2322 * casts on 32 or 64 bit architectures.
2324 * FIXME: implement this using target_type_is_incompatible ()
2327 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2329 MonoType *simple_type;
2333 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2337 for (i = 0; i < sig->param_count; ++i) {
2338 if (sig->params [i]->byref) {
2339 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2343 simple_type = mini_get_underlying_type (sig->params [i]);
2345 switch (simple_type->type) {
2346 case MONO_TYPE_VOID:
2355 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2361 case MONO_TYPE_FNPTR:
2362 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2365 case MONO_TYPE_CLASS:
2366 case MONO_TYPE_STRING:
2367 case MONO_TYPE_OBJECT:
2368 case MONO_TYPE_SZARRAY:
2369 case MONO_TYPE_ARRAY:
2370 if (args [i]->type != STACK_OBJ)
2375 if (args [i]->type != STACK_I8)
2379 if (args [i]->type != cfg->r4_stack_type)
2383 if (args [i]->type != STACK_R8)
2386 case MONO_TYPE_VALUETYPE:
2387 if (simple_type->data.klass->enumtype) {
2388 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2391 if (args [i]->type != STACK_VTYPE)
2394 case MONO_TYPE_TYPEDBYREF:
2395 if (args [i]->type != STACK_VTYPE)
2398 case MONO_TYPE_GENERICINST:
2399 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2402 case MONO_TYPE_MVAR:
2404 if (args [i]->type != STACK_VTYPE)
2408 g_error ("unknown type 0x%02x in check_call_signature",
2416 callvirt_to_call (int opcode)
2419 case OP_CALL_MEMBASE:
2421 case OP_VOIDCALL_MEMBASE:
2423 case OP_FCALL_MEMBASE:
2425 case OP_RCALL_MEMBASE:
2427 case OP_VCALL_MEMBASE:
2429 case OP_LCALL_MEMBASE:
2432 g_assert_not_reached ();
2439 callvirt_to_call_reg (int opcode)
2442 case OP_CALL_MEMBASE:
2444 case OP_VOIDCALL_MEMBASE:
2445 return OP_VOIDCALL_REG;
2446 case OP_FCALL_MEMBASE:
2447 return OP_FCALL_REG;
2448 case OP_RCALL_MEMBASE:
2449 return OP_RCALL_REG;
2450 case OP_VCALL_MEMBASE:
2451 return OP_VCALL_REG;
2452 case OP_LCALL_MEMBASE:
2453 return OP_LCALL_REG;
2455 g_assert_not_reached ();
2461 /* Either METHOD or IMT_ARG needs to be set */
2463 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2467 if (COMPILE_LLVM (cfg)) {
2469 method_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2472 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2473 method_reg = ins->dreg;
2477 call->imt_arg_reg = method_reg;
2479 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2484 method_reg = alloc_preg (cfg);
2485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2487 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2488 method_reg = ins->dreg;
2491 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2494 static MonoJumpInfo *
2495 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2497 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2501 ji->data.target = target;
2507 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2510 return mono_class_check_context_used (klass);
2516 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2519 return mono_method_check_context_used (method);
2525 * check_method_sharing:
2527 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2530 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2532 gboolean pass_vtable = FALSE;
2533 gboolean pass_mrgctx = FALSE;
2535 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2536 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2537 gboolean sharable = FALSE;
2539 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2543 * Pass vtable iff target method might
2544 * be shared, which means that sharing
2545 * is enabled for its class and its
2546 * context is sharable (and it's not a
2549 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2553 if (mini_method_get_context (cmethod) &&
2554 mini_method_get_context (cmethod)->method_inst) {
2555 g_assert (!pass_vtable);
2557 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2560 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2565 if (out_pass_vtable)
2566 *out_pass_vtable = pass_vtable;
2567 if (out_pass_mrgctx)
2568 *out_pass_mrgctx = pass_mrgctx;
2571 inline static MonoCallInst *
2572 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2573 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2577 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2585 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2587 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2589 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2592 call->signature = sig;
2593 call->rgctx_reg = rgctx;
2594 sig_ret = mini_get_underlying_type (sig->ret);
2596 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2599 if (mini_type_is_vtype (sig_ret)) {
2600 call->vret_var = cfg->vret_addr;
2601 //g_assert_not_reached ();
2603 } else if (mini_type_is_vtype (sig_ret)) {
2604 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2607 temp->backend.is_pinvoke = sig->pinvoke;
2610 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2611 * address of return value to increase optimization opportunities.
2612 * Before vtype decomposition, the dreg of the call ins itself represents the
2613 * fact the call modifies the return value. After decomposition, the call will
2614 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2615 * will be transformed into an LDADDR.
2617 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2618 loada->dreg = alloc_preg (cfg);
2619 loada->inst_p0 = temp;
2620 /* We reference the call too since call->dreg could change during optimization */
2621 loada->inst_p1 = call;
2622 MONO_ADD_INS (cfg->cbb, loada);
2624 call->inst.dreg = temp->dreg;
2626 call->vret_var = loada;
2627 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2628 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2630 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2631 if (COMPILE_SOFT_FLOAT (cfg)) {
2633 * If the call has a float argument, we would need to do an r8->r4 conversion using
2634 * an icall, but that cannot be done during the call sequence since it would clobber
2635 * the call registers + the stack. So we do it before emitting the call.
2637 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2639 MonoInst *in = call->args [i];
2641 if (i >= sig->hasthis)
2642 t = sig->params [i - sig->hasthis];
2644 t = &mono_defaults.int_class->byval_arg;
2645 t = mono_type_get_underlying_type (t);
2647 if (!t->byref && t->type == MONO_TYPE_R4) {
2648 MonoInst *iargs [1];
2652 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2654 /* The result will be in an int vreg */
2655 call->args [i] = conv;
2661 call->need_unbox_trampoline = unbox_trampoline;
2664 if (COMPILE_LLVM (cfg))
2665 mono_llvm_emit_call (cfg, call);
2667 mono_arch_emit_call (cfg, call);
2669 mono_arch_emit_call (cfg, call);
2672 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2673 cfg->flags |= MONO_CFG_HAS_CALLS;
2679 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2681 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2682 cfg->uses_rgctx_reg = TRUE;
2683 call->rgctx_reg = TRUE;
2685 call->rgctx_arg_reg = rgctx_reg;
2689 inline static MonoInst*
2690 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2695 gboolean check_sp = FALSE;
2697 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2698 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2700 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2705 rgctx_reg = mono_alloc_preg (cfg);
2706 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2710 if (!cfg->stack_inbalance_var)
2711 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2713 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 ins->dreg = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2718 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2720 call->inst.sreg1 = addr->dreg;
2723 emit_imt_argument (cfg, call, NULL, imt_arg);
2725 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2730 sp_reg = mono_alloc_preg (cfg);
2732 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2734 MONO_ADD_INS (cfg->cbb, ins);
2736 /* Restore the stack so we don't crash when throwing the exception */
2737 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2738 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2739 MONO_ADD_INS (cfg->cbb, ins);
2741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2742 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2746 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2748 return (MonoInst*)call;
2752 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2755 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2757 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2760 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2761 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2763 #ifndef DISABLE_REMOTING
2764 gboolean might_be_remote = FALSE;
2766 gboolean virtual_ = this_ins != NULL;
2767 gboolean enable_for_aot = TRUE;
2770 MonoInst *call_target = NULL;
2772 gboolean need_unbox_trampoline;
2775 sig = mono_method_signature (method);
2777 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2778 g_assert_not_reached ();
2781 rgctx_reg = mono_alloc_preg (cfg);
2782 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2785 if (method->string_ctor) {
2786 /* Create the real signature */
2787 /* FIXME: Cache these */
2788 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2789 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2794 context_used = mini_method_check_context_used (cfg, method);
2796 #ifndef DISABLE_REMOTING
2797 might_be_remote = this_ins && sig->hasthis &&
2798 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2799 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2801 if (might_be_remote && context_used) {
2804 g_assert (cfg->gshared);
2806 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2808 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2812 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2813 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2815 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2817 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2819 #ifndef DISABLE_REMOTING
2820 if (might_be_remote)
2821 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2824 call->method = method;
2825 call->inst.flags |= MONO_INST_HAS_METHOD;
2826 call->inst.inst_left = this_ins;
2827 call->tail_call = tail;
2830 int vtable_reg, slot_reg, this_reg;
2833 this_reg = this_ins->dreg;
2835 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2836 MonoInst *dummy_use;
2838 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2840 /* Make a call to delegate->invoke_impl */
2841 call->inst.inst_basereg = this_reg;
2842 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2845 /* We must emit a dummy use here because the delegate trampoline will
2846 replace the 'this' argument with the delegate target making this activation
2847 no longer a root for the delegate.
2848 This is an issue for delegates that target collectible code such as dynamic
2849 methods of GC'able assemblies.
2851 For a test case look into #667921.
2853 FIXME: a dummy use is not the best way to do it as the local register allocator
2854 will put it on a caller save register and spil it around the call.
2855 Ideally, we would either put it on a callee save register or only do the store part.
2857 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2859 return (MonoInst*)call;
2862 if ((!cfg->compile_aot || enable_for_aot) &&
2863 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2864 (MONO_METHOD_IS_FINAL (method) &&
2865 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2866 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2868 * the method is not virtual, we just need to ensure this is not null
2869 * and then we can call the method directly.
2871 #ifndef DISABLE_REMOTING
2872 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2874 * The check above ensures method is not gshared, this is needed since
2875 * gshared methods can't have wrappers.
2877 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2881 if (!method->string_ctor)
2882 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2884 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2885 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2887 * the method is virtual, but we can statically dispatch since either
2888 * it's class or the method itself are sealed.
2889 * But first we need to ensure it's not a null reference.
2891 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2893 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2894 } else if (call_target) {
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2898 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2899 call->inst.sreg1 = call_target->dreg;
2900 call->inst.flags &= !MONO_INST_HAS_METHOD;
2902 vtable_reg = alloc_preg (cfg);
2903 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2904 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2905 guint32 imt_slot = mono_method_get_imt_slot (method);
2906 emit_imt_argument (cfg, call, call->method, imt_arg);
2907 slot_reg = vtable_reg;
2908 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2910 slot_reg = vtable_reg;
2911 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2912 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2914 g_assert (mono_method_signature (method)->generic_param_count);
2915 emit_imt_argument (cfg, call, call->method, imt_arg);
2919 call->inst.sreg1 = slot_reg;
2920 call->inst.inst_offset = offset;
2921 call->is_virtual = TRUE;
2925 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2928 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2930 return (MonoInst*)call;
2934 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2936 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2940 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2947 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2950 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2952 return (MonoInst*)call;
2956 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2958 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2962 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2966 * mono_emit_abs_call:
2968 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2970 inline static MonoInst*
2971 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2972 MonoMethodSignature *sig, MonoInst **args)
2974 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2978 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2981 if (cfg->abs_patches == NULL)
2982 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2983 g_hash_table_insert (cfg->abs_patches, ji, ji);
2984 ins = mono_emit_native_call (cfg, ji, sig, args);
2985 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2989 static MonoMethodSignature*
2990 sig_to_rgctx_sig (MonoMethodSignature *sig)
2992 // FIXME: memory allocation
2993 MonoMethodSignature *res;
2996 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2997 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2998 res->param_count = sig->param_count + 1;
2999 for (i = 0; i < sig->param_count; ++i)
3000 res->params [i] = sig->params [i];
3001 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3005 /* Make an indirect call to FSIG passing an additional argument */
3007 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3009 MonoMethodSignature *csig;
3010 MonoInst *args_buf [16];
3012 int i, pindex, tmp_reg;
3014 /* Make a call with an rgctx/extra arg */
3015 if (fsig->param_count + 2 < 16)
3018 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3021 args [pindex ++] = orig_args [0];
3022 for (i = 0; i < fsig->param_count; ++i)
3023 args [pindex ++] = orig_args [fsig->hasthis + i];
3024 tmp_reg = alloc_preg (cfg);
3025 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3026 csig = sig_to_rgctx_sig (fsig);
3027 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3030 /* Emit an indirect call to the function descriptor ADDR */
3032 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3034 int addr_reg, arg_reg;
3035 MonoInst *call_target;
3037 g_assert (cfg->llvm_only);
3040 * addr points to a <addr, arg> pair, load both of them, and
3041 * make a call to addr, passing arg as an extra arg.
3043 addr_reg = alloc_preg (cfg);
3044 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3045 arg_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3048 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3052 direct_icalls_enabled (MonoCompile *cfg)
3054 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3056 if (cfg->compile_llvm)
3059 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3065 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3068 * Call the jit icall without a wrapper if possible.
3069 * The wrapper is needed for the following reasons:
3070 * - to handle exceptions thrown using mono_raise_exceptions () from the
3071 * icall function. The EH code needs the lmf frame pushed by the
3072 * wrapper to be able to unwind back to managed code.
3073 * - to be able to do stack walks for asynchronously suspended
3074 * threads when debugging.
3076 if (info->no_raise && direct_icalls_enabled (cfg)) {
3080 if (!info->wrapper_method) {
3081 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3082 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3084 mono_memory_barrier ();
3088 * Inline the wrapper method, which is basically a call to the C icall, and
3089 * an exception check.
3091 costs = inline_method (cfg, info->wrapper_method, NULL,
3092 args, NULL, cfg->real_offset, TRUE);
3093 g_assert (costs > 0);
3094 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3103 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3105 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3106 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3110 * Native code might return non register sized integers
3111 * without initializing the upper bits.
3113 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3114 case OP_LOADI1_MEMBASE:
3115 widen_op = OP_ICONV_TO_I1;
3117 case OP_LOADU1_MEMBASE:
3118 widen_op = OP_ICONV_TO_U1;
3120 case OP_LOADI2_MEMBASE:
3121 widen_op = OP_ICONV_TO_I2;
3123 case OP_LOADU2_MEMBASE:
3124 widen_op = OP_ICONV_TO_U2;
3130 if (widen_op != -1) {
3131 int dreg = alloc_preg (cfg);
3134 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3135 widen->type = ins->type;
3145 get_memcpy_method (void)
3147 static MonoMethod *memcpy_method = NULL;
3148 if (!memcpy_method) {
3149 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3151 g_error ("Old corlib found. Install a new one");
3153 return memcpy_method;
3157 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3159 MonoClassField *field;
3160 gpointer iter = NULL;
3162 while ((field = mono_class_get_fields (klass, &iter))) {
3165 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3167 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3168 if (mini_type_is_reference (mono_field_get_type (field))) {
3169 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3170 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3172 MonoClass *field_class = mono_class_from_mono_type (field->type);
3173 if (field_class->has_references)
3174 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3180 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3182 int card_table_shift_bits;
3183 gpointer card_table_mask;
3185 MonoInst *dummy_use;
3186 int nursery_shift_bits;
3187 size_t nursery_size;
3189 if (!cfg->gen_write_barriers)
3192 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3194 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3196 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3199 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3200 wbarrier->sreg1 = ptr->dreg;
3201 wbarrier->sreg2 = value->dreg;
3202 MONO_ADD_INS (cfg->cbb, wbarrier);
3203 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3204 int offset_reg = alloc_preg (cfg);
3208 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3209 if (card_table_mask)
3210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3212 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3213 * IMM's larger than 32bits.
3215 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3216 card_reg = ins->dreg;
3218 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3219 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3221 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3222 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3225 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3229 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3231 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3232 unsigned need_wb = 0;
3237 /*types with references can't have alignment smaller than sizeof(void*) */
3238 if (align < SIZEOF_VOID_P)
3241 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3242 if (size > 32 * SIZEOF_VOID_P)
3245 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3247 /* We don't unroll more than 5 stores to avoid code bloat. */
3248 if (size > 5 * SIZEOF_VOID_P) {
3249 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3250 size += (SIZEOF_VOID_P - 1);
3251 size &= ~(SIZEOF_VOID_P - 1);
3253 EMIT_NEW_ICONST (cfg, iargs [2], size);
3254 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3255 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3259 destreg = iargs [0]->dreg;
3260 srcreg = iargs [1]->dreg;
3263 dest_ptr_reg = alloc_preg (cfg);
3264 tmp_reg = alloc_preg (cfg);
3267 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3269 while (size >= SIZEOF_VOID_P) {
3270 MonoInst *load_inst;
3271 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3272 load_inst->dreg = tmp_reg;
3273 load_inst->inst_basereg = srcreg;
3274 load_inst->inst_offset = offset;
3275 MONO_ADD_INS (cfg->cbb, load_inst);
3277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3280 emit_write_barrier (cfg, iargs [0], load_inst);
3282 offset += SIZEOF_VOID_P;
3283 size -= SIZEOF_VOID_P;
3286 /*tmp += sizeof (void*)*/
3287 if (size >= SIZEOF_VOID_P) {
3288 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3289 MONO_ADD_INS (cfg->cbb, iargs [0]);
3293 /* Those cannot be references since size < sizeof (void*) */
3295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3302 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3309 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3310 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3319 * Emit code to copy a valuetype of type @klass whose address is stored in
3320 * @src->dreg to memory whose address is stored at @dest->dreg.
3323 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3325 MonoInst *iargs [4];
3328 MonoMethod *memcpy_method;
3329 MonoInst *size_ins = NULL;
3330 MonoInst *memcpy_ins = NULL;
3334 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3337 * This check breaks with spilled vars... need to handle it during verification anyway.
3338 * g_assert (klass && klass == src->klass && klass == dest->klass);
3341 if (mini_is_gsharedvt_klass (klass)) {
3343 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3344 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3348 n = mono_class_native_size (klass, &align);
3350 n = mono_class_value_size (klass, &align);
3352 /* if native is true there should be no references in the struct */
3353 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3354 /* Avoid barriers when storing to the stack */
3355 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3356 (dest->opcode == OP_LDADDR))) {
3362 context_used = mini_class_check_context_used (cfg, klass);
3364 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3365 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3367 } else if (context_used) {
3368 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3370 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3371 if (!cfg->compile_aot)
3372 mono_class_compute_gc_descriptor (klass);
3376 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3378 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3383 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3384 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3385 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3390 iargs [2] = size_ins;
3392 EMIT_NEW_ICONST (cfg, iargs [2], n);
3394 memcpy_method = get_memcpy_method ();
3396 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3398 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3403 get_memset_method (void)
3405 static MonoMethod *memset_method = NULL;
3406 if (!memset_method) {
3407 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3409 g_error ("Old corlib found. Install a new one");
3411 return memset_method;
3415 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3417 MonoInst *iargs [3];
3420 MonoMethod *memset_method;
3421 MonoInst *size_ins = NULL;
3422 MonoInst *bzero_ins = NULL;
3423 static MonoMethod *bzero_method;
3425 /* FIXME: Optimize this for the case when dest is an LDADDR */
3426 mono_class_init (klass);
3427 if (mini_is_gsharedvt_klass (klass)) {
3428 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3429 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3431 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3432 g_assert (bzero_method);
3434 iargs [1] = size_ins;
3435 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3439 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3441 n = mono_class_value_size (klass, &align);
3443 if (n <= sizeof (gpointer) * 8) {
3444 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3447 memset_method = get_memset_method ();
3449 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3450 EMIT_NEW_ICONST (cfg, iargs [2], n);
3451 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3458 * Emit IR to return either the this pointer for instance method,
3459 * or the mrgctx for static methods.
3462 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3464 MonoInst *this_ins = NULL;
3466 g_assert (cfg->gshared);
3468 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3469 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3470 !method->klass->valuetype)
3471 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3473 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3474 MonoInst *mrgctx_loc, *mrgctx_var;
3476 g_assert (!this_ins);
3477 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3479 mrgctx_loc = mono_get_vtable_var (cfg);
3480 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3483 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3484 MonoInst *vtable_loc, *vtable_var;
3486 g_assert (!this_ins);
3488 vtable_loc = mono_get_vtable_var (cfg);
3489 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3491 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3492 MonoInst *mrgctx_var = vtable_var;
3495 vtable_reg = alloc_preg (cfg);
3496 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3497 vtable_var->type = STACK_PTR;
3505 vtable_reg = alloc_preg (cfg);
3506 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3511 static MonoJumpInfoRgctxEntry *
3512 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3514 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3515 res->method = method;
3516 res->in_mrgctx = in_mrgctx;
3517 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3518 res->data->type = patch_type;
3519 res->data->data.target = patch_data;
3520 res->info_type = info_type;
3525 static inline MonoInst*
3526 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3528 MonoInst *args [16];
3531 // FIXME: No fastpath since the slot is not a compile time constant
3533 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3534 if (entry->in_mrgctx)
3535 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3537 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3541 * FIXME: This can be called during decompose, which is a problem since it creates
3543 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3545 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3547 MonoBasicBlock *is_null_bb, *end_bb;
3548 MonoInst *res, *ins, *call;
3551 slot = mini_get_rgctx_entry_slot (entry);
3553 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3554 index = MONO_RGCTX_SLOT_INDEX (slot);
3556 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3557 for (depth = 0; ; ++depth) {
3558 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3560 if (index < size - 1)
3565 NEW_BBLOCK (cfg, end_bb);
3566 NEW_BBLOCK (cfg, is_null_bb);
3569 rgctx_reg = rgctx->dreg;
3571 rgctx_reg = alloc_preg (cfg);
3573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3574 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3575 NEW_BBLOCK (cfg, is_null_bb);
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3581 for (i = 0; i < depth; ++i) {
3582 int array_reg = alloc_preg (cfg);
3584 /* load ptr to next array */
3585 if (mrgctx && i == 0)
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3589 rgctx_reg = array_reg;
3590 /* is the ptr null? */
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3592 /* if yes, jump to actual trampoline */
3593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3597 val_reg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3599 /* is the slot null? */
3600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3601 /* if yes, jump to actual trampoline */
3602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3605 res_reg = alloc_preg (cfg);
3606 MONO_INST_NEW (cfg, ins, OP_MOVE);
3607 ins->dreg = res_reg;
3608 ins->sreg1 = val_reg;
3609 MONO_ADD_INS (cfg->cbb, ins);
3611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3614 MONO_START_BB (cfg, is_null_bb);
3616 EMIT_NEW_ICONST (cfg, args [1], index);
3618 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3620 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3621 MONO_INST_NEW (cfg, ins, OP_MOVE);
3622 ins->dreg = res_reg;
3623 ins->sreg1 = call->dreg;
3624 MONO_ADD_INS (cfg->cbb, ins);
3625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3627 MONO_START_BB (cfg, end_bb);
3636 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3639 static inline MonoInst*
3640 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3643 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3645 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3649 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3650 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3652 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3653 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3655 return emit_rgctx_fetch (cfg, rgctx, entry);
3659 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3660 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3662 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3663 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3665 return emit_rgctx_fetch (cfg, rgctx, entry);
3669 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3670 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3672 MonoJumpInfoGSharedVtCall *call_info;
3673 MonoJumpInfoRgctxEntry *entry;
3676 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3677 call_info->sig = sig;
3678 call_info->method = cmethod;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_virt_method:
3689 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3692 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3693 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3695 MonoJumpInfoVirtMethod *info;
3696 MonoJumpInfoRgctxEntry *entry;
3699 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3700 info->klass = klass;
3701 info->method = virt_method;
3703 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3704 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3706 return emit_rgctx_fetch (cfg, rgctx, entry);
3710 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3711 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3713 MonoJumpInfoRgctxEntry *entry;
3716 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3717 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 * emit_get_rgctx_method:
3725 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3726 * normal constants, else emit a load from the rgctx.
3729 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3730 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3732 if (!context_used) {
3735 switch (rgctx_type) {
3736 case MONO_RGCTX_INFO_METHOD:
3737 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3739 case MONO_RGCTX_INFO_METHOD_RGCTX:
3740 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3743 g_assert_not_reached ();
3746 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3747 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3749 return emit_rgctx_fetch (cfg, rgctx, entry);
3754 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3755 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3757 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3758 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3760 return emit_rgctx_fetch (cfg, rgctx, entry);
3764 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3766 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3767 MonoRuntimeGenericContextInfoTemplate *template_;
3772 for (i = 0; i < info->num_entries; ++i) {
3773 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3775 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3779 if (info->num_entries == info->count_entries) {
3780 MonoRuntimeGenericContextInfoTemplate *new_entries;
3781 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3783 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3785 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3786 info->entries = new_entries;
3787 info->count_entries = new_count_entries;
3790 idx = info->num_entries;
3791 template_ = &info->entries [idx];
3792 template_->info_type = rgctx_type;
3793 template_->data = data;
3795 info->num_entries ++;
3801 * emit_get_gsharedvt_info:
3803 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3806 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3811 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3812 /* Load info->entries [idx] */
3813 dreg = alloc_preg (cfg);
3814 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3820 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3822 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3826 * On return the caller must check @klass for load errors.
3829 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3831 MonoInst *vtable_arg;
3834 context_used = mini_class_check_context_used (cfg, klass);
3837 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3838 klass, MONO_RGCTX_INFO_VTABLE);
3840 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3844 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3847 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3851 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3852 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3854 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3855 ins->sreg1 = vtable_arg->dreg;
3856 MONO_ADD_INS (cfg->cbb, ins);
3858 static int byte_offset = -1;
3859 static guint8 bitmask;
3860 int bits_reg, inited_reg;
3861 MonoBasicBlock *inited_bb;
3862 MonoInst *args [16];
3864 if (byte_offset < 0)
3865 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3867 bits_reg = alloc_ireg (cfg);
3868 inited_reg = alloc_ireg (cfg);
3870 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3873 NEW_BBLOCK (cfg, inited_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3878 args [0] = vtable_arg;
3879 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3881 MONO_START_BB (cfg, inited_bb);
3886 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3890 if (cfg->gen_seq_points && cfg->method == method) {
3891 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3893 ins->flags |= MONO_INST_NONEMPTY_STACK;
3894 MONO_ADD_INS (cfg->cbb, ins);
3899 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3901 if (mini_get_debug_options ()->better_cast_details) {
3902 int vtable_reg = alloc_preg (cfg);
3903 int klass_reg = alloc_preg (cfg);
3904 MonoBasicBlock *is_null_bb = NULL;
3906 int to_klass_reg, context_used;
3909 NEW_BBLOCK (cfg, is_null_bb);
3911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3912 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3915 tls_get = mono_get_jit_tls_intrinsic (cfg);
3917 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3921 MONO_ADD_INS (cfg->cbb, tls_get);
3922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3927 context_used = mini_class_check_context_used (cfg, klass);
3929 MonoInst *class_ins;
3931 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3932 to_klass_reg = class_ins->dreg;
3934 to_klass_reg = alloc_preg (cfg);
3935 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3940 MONO_START_BB (cfg, is_null_bb);
3945 reset_cast_details (MonoCompile *cfg)
3947 /* Reset the variables holding the cast details */
3948 if (mini_get_debug_options ()->better_cast_details) {
3949 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3951 MONO_ADD_INS (cfg->cbb, tls_get);
3952 /* It is enough to reset the from field */
3953 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3958 * On return the caller must check @array_class for load errors
3961 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3963 int vtable_reg = alloc_preg (cfg);
3966 context_used = mini_class_check_context_used (cfg, array_class);
3968 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3970 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3972 if (cfg->opt & MONO_OPT_SHARED) {
3973 int class_reg = alloc_preg (cfg);
3976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3977 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3978 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3979 } else if (context_used) {
3980 MonoInst *vtable_ins;
3982 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3983 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3985 if (cfg->compile_aot) {
3989 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3991 vt_reg = alloc_preg (cfg);
3992 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3993 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3996 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4002 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4004 reset_cast_details (cfg);
4008 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4009 * generic code is generated.
4012 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4014 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4017 MonoInst *rgctx, *addr;
4019 /* FIXME: What if the class is shared? We might not
4020 have to get the address of the method from the
4022 addr = emit_get_rgctx_method (cfg, context_used, method,
4023 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4024 if (cfg->llvm_only && cfg->gsharedvt) {
4025 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4027 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4029 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4032 gboolean pass_vtable, pass_mrgctx;
4033 MonoInst *rgctx_arg = NULL;
4035 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4036 g_assert (!pass_mrgctx);
4039 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4042 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4045 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4050 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4054 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4055 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4056 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4057 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4059 obj_reg = sp [0]->dreg;
4060 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4061 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4063 /* FIXME: generics */
4064 g_assert (klass->rank == 0);
4067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4068 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4074 MonoInst *element_class;
4076 /* This assertion is from the unboxcast insn */
4077 g_assert (klass->rank == 0);
4079 element_class = emit_get_rgctx_klass (cfg, context_used,
4080 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4082 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4083 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4085 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4086 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4087 reset_cast_details (cfg);
4090 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4091 MONO_ADD_INS (cfg->cbb, add);
4092 add->type = STACK_MP;
4099 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4101 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4102 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4106 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4112 args [1] = klass_inst;
4115 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4117 NEW_BBLOCK (cfg, is_ref_bb);
4118 NEW_BBLOCK (cfg, is_nullable_bb);
4119 NEW_BBLOCK (cfg, end_bb);
4120 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4127 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4128 addr_reg = alloc_dreg (cfg, STACK_MP);
4132 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4133 MONO_ADD_INS (cfg->cbb, addr);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4138 MONO_START_BB (cfg, is_ref_bb);
4140 /* Save the ref to a temporary */
4141 dreg = alloc_ireg (cfg);
4142 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4143 addr->dreg = addr_reg;
4144 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4148 MONO_START_BB (cfg, is_nullable_bb);
4151 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4152 MonoInst *unbox_call;
4153 MonoMethodSignature *unbox_sig;
4155 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4156 unbox_sig->ret = &klass->byval_arg;
4157 unbox_sig->param_count = 1;
4158 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4161 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4163 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4165 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4166 addr->dreg = addr_reg;
4169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4172 MONO_START_BB (cfg, end_bb);
4175 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4181 * Returns NULL and set the cfg exception on error.
4184 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4186 MonoInst *iargs [2];
4191 MonoRgctxInfoType rgctx_info;
4192 MonoInst *iargs [2];
4193 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4195 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4197 if (cfg->opt & MONO_OPT_SHARED)
4198 rgctx_info = MONO_RGCTX_INFO_KLASS;
4200 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4201 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4203 if (cfg->opt & MONO_OPT_SHARED) {
4204 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4206 alloc_ftn = ves_icall_object_new;
4209 alloc_ftn = ves_icall_object_new_specific;
4212 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4213 if (known_instance_size) {
4214 int size = mono_class_instance_size (klass);
4215 if (size < sizeof (MonoObject))
4216 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4218 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4220 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4223 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4226 if (cfg->opt & MONO_OPT_SHARED) {
4227 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4228 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4230 alloc_ftn = ves_icall_object_new;
4231 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4232 /* This happens often in argument checking code, eg. throw new FooException... */
4233 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4234 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4235 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4237 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4238 MonoMethod *managed_alloc = NULL;
4242 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4243 cfg->exception_ptr = klass;
4247 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4249 if (managed_alloc) {
4250 int size = mono_class_instance_size (klass);
4251 if (size < sizeof (MonoObject))
4252 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4254 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4255 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4256 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4258 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4260 guint32 lw = vtable->klass->instance_size;
4261 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4262 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4263 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4266 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4270 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4274 * Returns NULL and set the cfg exception on error.
4277 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4279 MonoInst *alloc, *ins;
4281 if (mono_class_is_nullable (klass)) {
4282 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4285 if (cfg->llvm_only && cfg->gsharedvt) {
4286 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4287 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4288 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4290 /* FIXME: What if the class is shared? We might not
4291 have to get the method address from the RGCTX. */
4292 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4293 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4294 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4296 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4299 gboolean pass_vtable, pass_mrgctx;
4300 MonoInst *rgctx_arg = NULL;
4302 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4303 g_assert (!pass_mrgctx);
4306 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4309 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4312 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4316 if (mini_is_gsharedvt_klass (klass)) {
4317 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4318 MonoInst *res, *is_ref, *src_var, *addr;
4321 dreg = alloc_ireg (cfg);
4323 NEW_BBLOCK (cfg, is_ref_bb);
4324 NEW_BBLOCK (cfg, is_nullable_bb);
4325 NEW_BBLOCK (cfg, end_bb);
4326 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4334 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4338 ins->opcode = OP_STOREV_MEMBASE;
4340 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4341 res->type = STACK_OBJ;
4343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4346 MONO_START_BB (cfg, is_ref_bb);
4348 /* val is a vtype, so has to load the value manually */
4349 src_var = get_vreg_to_inst (cfg, val->dreg);
4351 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4352 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4357 MONO_START_BB (cfg, is_nullable_bb);
4360 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4361 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4363 MonoMethodSignature *box_sig;
4366 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4367 * construct that method at JIT time, so have to do things by hand.
4369 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4370 box_sig->ret = &mono_defaults.object_class->byval_arg;
4371 box_sig->param_count = 1;
4372 box_sig->params [0] = &klass->byval_arg;
4375 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4377 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4378 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4379 res->type = STACK_OBJ;
4383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4385 MONO_START_BB (cfg, end_bb);
4389 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4393 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4399 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4402 MonoGenericContainer *container;
4403 MonoGenericInst *ginst;
4405 if (klass->generic_class) {
4406 container = klass->generic_class->container_class->generic_container;
4407 ginst = klass->generic_class->context.class_inst;
4408 } else if (klass->generic_container && context_used) {
4409 container = klass->generic_container;
4410 ginst = container->context.class_inst;
4415 for (i = 0; i < container->type_argc; ++i) {
4417 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4419 type = ginst->type_argv [i];
4420 if (mini_type_is_reference (type))
4426 static GHashTable* direct_icall_type_hash;
4429 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4431 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4432 if (!direct_icalls_enabled (cfg))
4436 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4437 * Whitelist a few icalls for now.
4439 if (!direct_icall_type_hash) {
4440 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4442 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4443 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4444 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4445 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4446 mono_memory_barrier ();
4447 direct_icall_type_hash = h;
4450 if (cmethod->klass == mono_defaults.math_class)
4452 /* No locking needed */
4453 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4458 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4461 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4463 MonoMethod *mono_castclass;
4466 mono_castclass = mono_marshal_get_castclass_with_cache ();
4468 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4469 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4470 reset_cast_details (cfg);
4476 get_castclass_cache_idx (MonoCompile *cfg)
4478 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4479 cfg->castclass_cache_index ++;
4480 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4484 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4493 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4496 idx = get_castclass_cache_idx (cfg);
4497 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4499 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4500 return emit_castclass_with_cache (cfg, klass, args);
4504 * Returns NULL and set the cfg exception on error.
4507 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4509 MonoBasicBlock *is_null_bb;
4510 int obj_reg = src->dreg;
4511 int vtable_reg = alloc_preg (cfg);
4513 MonoInst *klass_inst = NULL, *res;
4515 context_used = mini_class_check_context_used (cfg, klass);
4517 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4518 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4519 (*inline_costs) += 2;
4521 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4522 MonoMethod *mono_castclass;
4523 MonoInst *iargs [1];
4526 mono_castclass = mono_marshal_get_castclass (klass);
4529 save_cast_details (cfg, klass, src->dreg, TRUE);
4530 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4531 iargs, ip, cfg->real_offset, TRUE);
4532 reset_cast_details (cfg);
4533 CHECK_CFG_EXCEPTION;
4534 g_assert (costs > 0);
4536 cfg->real_offset += 5;
4538 (*inline_costs) += costs;
4546 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4547 MonoInst *cache_ins;
4549 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4554 /* klass - it's the second element of the cache entry*/
4555 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4558 args [2] = cache_ins;
4560 return emit_castclass_with_cache (cfg, klass, args);
4563 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4566 NEW_BBLOCK (cfg, is_null_bb);
4568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4571 save_cast_details (cfg, klass, obj_reg, FALSE);
4573 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4575 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4577 int klass_reg = alloc_preg (cfg);
4579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4581 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4582 /* the remoting code is broken, access the class for now */
4583 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4584 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4586 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4587 cfg->exception_ptr = klass;
4590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4595 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4598 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4602 MONO_START_BB (cfg, is_null_bb);
4604 reset_cast_details (cfg);
4613 * Returns NULL and set the cfg exception on error.
4616 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4619 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4620 int obj_reg = src->dreg;
4621 int vtable_reg = alloc_preg (cfg);
4622 int res_reg = alloc_ireg_ref (cfg);
4623 MonoInst *klass_inst = NULL;
4628 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4629 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4630 MonoInst *cache_ins;
4632 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4637 /* klass - it's the second element of the cache entry*/
4638 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4641 args [2] = cache_ins;
4643 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4646 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4649 NEW_BBLOCK (cfg, is_null_bb);
4650 NEW_BBLOCK (cfg, false_bb);
4651 NEW_BBLOCK (cfg, end_bb);
4653 /* Do the assignment at the beginning, so the other assignment can be if converted */
4654 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4655 ins->type = STACK_OBJ;
4658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4663 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4664 g_assert (!context_used);
4665 /* the is_null_bb target simply copies the input register to the output */
4666 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4668 int klass_reg = alloc_preg (cfg);
4671 int rank_reg = alloc_preg (cfg);
4672 int eclass_reg = alloc_preg (cfg);
4674 g_assert (!context_used);
4675 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4680 if (klass->cast_class == mono_defaults.object_class) {
4681 int parent_reg = alloc_preg (cfg);
4682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4683 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4684 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4686 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4687 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4688 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4690 } else if (klass->cast_class == mono_defaults.enum_class) {
4691 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4696 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4697 /* Check that the object is a vector too */
4698 int bounds_reg = alloc_preg (cfg);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4704 /* the is_null_bb target simply copies the input register to the output */
4705 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4707 } else if (mono_class_is_nullable (klass)) {
4708 g_assert (!context_used);
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4710 /* the is_null_bb target simply copies the input register to the output */
4711 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4713 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4714 g_assert (!context_used);
4715 /* the remoting code is broken, access the class for now */
4716 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4717 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4719 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4720 cfg->exception_ptr = klass;
4723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4731 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4732 /* the is_null_bb target simply copies the input register to the output */
4733 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4738 MONO_START_BB (cfg, false_bb);
4740 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4741 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4743 MONO_START_BB (cfg, is_null_bb);
4745 MONO_START_BB (cfg, end_bb);
4751 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4753 /* This opcode takes as input an object reference and a class, and returns:
4754 0) if the object is an instance of the class,
4755 1) if the object is not instance of the class,
4756 2) if the object is a proxy whose type cannot be determined */
4759 #ifndef DISABLE_REMOTING
4760 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4762 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4764 int obj_reg = src->dreg;
4765 int dreg = alloc_ireg (cfg);
4767 #ifndef DISABLE_REMOTING
4768 int klass_reg = alloc_preg (cfg);
4771 NEW_BBLOCK (cfg, true_bb);
4772 NEW_BBLOCK (cfg, false_bb);
4773 NEW_BBLOCK (cfg, end_bb);
4774 #ifndef DISABLE_REMOTING
4775 NEW_BBLOCK (cfg, false2_bb);
4776 NEW_BBLOCK (cfg, no_proxy_bb);
4779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4782 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4783 #ifndef DISABLE_REMOTING
4784 NEW_BBLOCK (cfg, interface_fail_bb);
4787 tmp_reg = alloc_preg (cfg);
4788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4789 #ifndef DISABLE_REMOTING
4790 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4791 MONO_START_BB (cfg, interface_fail_bb);
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4794 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4796 tmp_reg = alloc_preg (cfg);
4797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4799 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4801 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4804 #ifndef DISABLE_REMOTING
4805 tmp_reg = alloc_preg (cfg);
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4809 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4810 tmp_reg = alloc_preg (cfg);
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4814 tmp_reg = alloc_preg (cfg);
4815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4817 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4819 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4820 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4822 MONO_START_BB (cfg, no_proxy_bb);
4824 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4826 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4830 MONO_START_BB (cfg, false_bb);
4832 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4835 #ifndef DISABLE_REMOTING
4836 MONO_START_BB (cfg, false2_bb);
4838 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4839 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4842 MONO_START_BB (cfg, true_bb);
4844 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4846 MONO_START_BB (cfg, end_bb);
4849 MONO_INST_NEW (cfg, ins, OP_ICONST);
4851 ins->type = STACK_I4;
4857 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4859 /* This opcode takes as input an object reference and a class, and returns:
4860 0) if the object is an instance of the class,
4861 1) if the object is a proxy whose type cannot be determined
4862 an InvalidCastException exception is thrown otherwhise*/
4865 #ifndef DISABLE_REMOTING
4866 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4868 MonoBasicBlock *ok_result_bb;
4870 int obj_reg = src->dreg;
4871 int dreg = alloc_ireg (cfg);
4872 int tmp_reg = alloc_preg (cfg);
4874 #ifndef DISABLE_REMOTING
4875 int klass_reg = alloc_preg (cfg);
4876 NEW_BBLOCK (cfg, end_bb);
4879 NEW_BBLOCK (cfg, ok_result_bb);
4881 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4882 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4884 save_cast_details (cfg, klass, obj_reg, FALSE);
4886 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4887 #ifndef DISABLE_REMOTING
4888 NEW_BBLOCK (cfg, interface_fail_bb);
4890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4891 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4892 MONO_START_BB (cfg, interface_fail_bb);
4893 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4895 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4897 tmp_reg = alloc_preg (cfg);
4898 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4900 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4902 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4903 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4906 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4910 #ifndef DISABLE_REMOTING
4911 NEW_BBLOCK (cfg, no_proxy_bb);
4913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4915 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4917 tmp_reg = alloc_preg (cfg);
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4921 tmp_reg = alloc_preg (cfg);
4922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4924 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4926 NEW_BBLOCK (cfg, fail_1_bb);
4928 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4930 MONO_START_BB (cfg, fail_1_bb);
4932 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4935 MONO_START_BB (cfg, no_proxy_bb);
4937 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4939 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4943 MONO_START_BB (cfg, ok_result_bb);
4945 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4947 #ifndef DISABLE_REMOTING
4948 MONO_START_BB (cfg, end_bb);
4952 MONO_INST_NEW (cfg, ins, OP_ICONST);
4954 ins->type = STACK_I4;
4959 static G_GNUC_UNUSED MonoInst*
4960 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4962 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4963 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4966 switch (enum_type->type) {
4969 #if SIZEOF_REGISTER == 8
4981 MonoInst *load, *and_, *cmp, *ceq;
4982 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4983 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4984 int dest_reg = alloc_ireg (cfg);
4986 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4987 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4988 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4989 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4991 ceq->type = STACK_I4;
4994 load = mono_decompose_opcode (cfg, load);
4995 and_ = mono_decompose_opcode (cfg, and_);
4996 cmp = mono_decompose_opcode (cfg, cmp);
4997 ceq = mono_decompose_opcode (cfg, ceq);
5005 * Returns NULL and set the cfg exception on error.
5007 static G_GNUC_UNUSED MonoInst*
5008 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5012 gpointer trampoline;
5013 MonoInst *obj, *method_ins, *tramp_ins;
5017 if (virtual_ && !cfg->llvm_only) {
5018 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5021 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5025 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5029 /* Inline the contents of mono_delegate_ctor */
5031 /* Set target field */
5032 /* Optimize away setting of NULL target */
5033 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5034 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5035 if (cfg->gen_write_barriers) {
5036 dreg = alloc_preg (cfg);
5037 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5038 emit_write_barrier (cfg, ptr, target);
5042 /* Set method field */
5043 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5047 * To avoid looking up the compiled code belonging to the target method
5048 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5049 * store it, and we fill it after the method has been compiled.
5051 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5052 MonoInst *code_slot_ins;
5055 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5057 domain = mono_domain_get ();
5058 mono_domain_lock (domain);
5059 if (!domain_jit_info (domain)->method_code_hash)
5060 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5061 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5063 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5064 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5066 mono_domain_unlock (domain);
5068 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5073 if (cfg->llvm_only) {
5074 MonoInst *args [16];
5079 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5080 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5083 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5089 if (cfg->compile_aot) {
5090 MonoDelegateClassMethodPair *del_tramp;
5092 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5093 del_tramp->klass = klass;
5094 del_tramp->method = context_used ? NULL : method;
5095 del_tramp->is_virtual = virtual_;
5096 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5099 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5101 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5102 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5105 /* Set invoke_impl field */
5107 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5109 dreg = alloc_preg (cfg);
5110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5113 dreg = alloc_preg (cfg);
5114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5118 dreg = alloc_preg (cfg);
5119 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5120 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5122 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5128 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5130 MonoJitICallInfo *info;
5132 /* Need to register the icall so it gets an icall wrapper */
5133 info = mono_get_array_new_va_icall (rank);
5135 cfg->flags |= MONO_CFG_HAS_VARARGS;
5137 /* mono_array_new_va () needs a vararg calling convention */
5138 cfg->exception_message = g_strdup ("array-new");
5139 cfg->disable_llvm = TRUE;
5141 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5142 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5146 * handle_constrained_gsharedvt_call:
5148 * Handle constrained calls where the receiver is a gsharedvt type.
5149 * Return the instruction representing the call. Set the cfg exception on failure.
5152 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5153 gboolean *ref_emit_widen)
5155 MonoInst *ins = NULL;
5156 gboolean emit_widen = *ref_emit_widen;
5159 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5160 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5161 * pack the arguments into an array, and do the rest of the work in in an icall.
5163 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5164 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5165 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5166 MonoInst *args [16];
5169 * This case handles calls to
5170 * - object:ToString()/Equals()/GetHashCode(),
5171 * - System.IComparable<T>:CompareTo()
5172 * - System.IEquatable<T>:Equals ()
5173 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5177 if (mono_method_check_context_used (cmethod))
5178 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5180 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5181 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5183 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5184 if (fsig->hasthis && fsig->param_count) {
5185 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5186 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5187 ins->dreg = alloc_preg (cfg);
5188 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5189 MONO_ADD_INS (cfg->cbb, ins);
5192 if (mini_is_gsharedvt_type (fsig->params [0])) {
5193 int addr_reg, deref_arg_reg;
5195 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5196 deref_arg_reg = alloc_preg (cfg);
5197 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5198 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5200 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5201 addr_reg = ins->dreg;
5202 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5204 EMIT_NEW_ICONST (cfg, args [3], 0);
5205 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5208 EMIT_NEW_ICONST (cfg, args [3], 0);
5209 EMIT_NEW_ICONST (cfg, args [4], 0);
5211 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5214 if (mini_is_gsharedvt_type (fsig->ret)) {
5215 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5216 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5220 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5221 MONO_ADD_INS (cfg->cbb, add);
5223 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5224 MONO_ADD_INS (cfg->cbb, ins);
5225 /* ins represents the call result */
5228 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5231 *ref_emit_widen = emit_widen;
5240 mono_emit_load_got_addr (MonoCompile *cfg)
5242 MonoInst *getaddr, *dummy_use;
5244 if (!cfg->got_var || cfg->got_var_allocated)
5247 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5248 getaddr->cil_code = cfg->header->code;
5249 getaddr->dreg = cfg->got_var->dreg;
5251 /* Add it to the start of the first bblock */
5252 if (cfg->bb_entry->code) {
5253 getaddr->next = cfg->bb_entry->code;
5254 cfg->bb_entry->code = getaddr;
5257 MONO_ADD_INS (cfg->bb_entry, getaddr);
5259 cfg->got_var_allocated = TRUE;
5262 * Add a dummy use to keep the got_var alive, since real uses might
5263 * only be generated by the back ends.
5264 * Add it to end_bblock, so the variable's lifetime covers the whole
5266 * It would be better to make the usage of the got var explicit in all
5267 * cases when the backend needs it (i.e. calls, throw etc.), so this
5268 * wouldn't be needed.
5270 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5271 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5274 static int inline_limit;
5275 static gboolean inline_limit_inited;
5278 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5280 MonoMethodHeaderSummary header;
5282 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5283 MonoMethodSignature *sig = mono_method_signature (method);
5287 if (cfg->disable_inline)
5292 if (cfg->inline_depth > 10)
5295 if (!mono_method_get_header_summary (method, &header))
5298 /*runtime, icall and pinvoke are checked by summary call*/
5299 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5300 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5301 (mono_class_is_marshalbyref (method->klass)) ||
5305 /* also consider num_locals? */
5306 /* Do the size check early to avoid creating vtables */
5307 if (!inline_limit_inited) {
5308 if (g_getenv ("MONO_INLINELIMIT"))
5309 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5311 inline_limit = INLINE_LENGTH_LIMIT;
5312 inline_limit_inited = TRUE;
5314 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5318 * if we can initialize the class of the method right away, we do,
5319 * otherwise we don't allow inlining if the class needs initialization,
5320 * since it would mean inserting a call to mono_runtime_class_init()
5321 * inside the inlined code
5323 if (!(cfg->opt & MONO_OPT_SHARED)) {
5324 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5325 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5326 vtable = mono_class_vtable (cfg->domain, method->klass);
5329 if (!cfg->compile_aot) {
5331 if (!mono_runtime_class_init_full (vtable, &error))
5332 mono_error_raise_exception (&error); /* FIXME don't raise here */
5334 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5335 if (cfg->run_cctors && method->klass->has_cctor) {
5336 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5337 if (!method->klass->runtime_info)
5338 /* No vtable created yet */
5340 vtable = mono_class_vtable (cfg->domain, method->klass);
5343 /* This makes so that inline cannot trigger */
5344 /* .cctors: too many apps depend on them */
5345 /* running with a specific order... */
5346 if (! vtable->initialized)
5349 if (!mono_runtime_class_init_full (vtable, &error))
5350 mono_error_raise_exception (&error); /* FIXME don't raise here */
5352 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5353 if (!method->klass->runtime_info)
5354 /* No vtable created yet */
5356 vtable = mono_class_vtable (cfg->domain, method->klass);
5359 if (!vtable->initialized)
5364 * If we're compiling for shared code
5365 * the cctor will need to be run at aot method load time, for example,
5366 * or at the end of the compilation of the inlining method.
5368 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5372 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5373 if (mono_arch_is_soft_float ()) {
5375 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5377 for (i = 0; i < sig->param_count; ++i)
5378 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5383 if (g_list_find (cfg->dont_inline, method))
5390 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5392 if (!cfg->compile_aot) {
5394 if (vtable->initialized)
5398 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5399 if (cfg->method == method)
5403 if (!mono_class_needs_cctor_run (klass, method))
5406 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5407 /* The initialization is already done before the method is called */
5414 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5418 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5421 if (mini_is_gsharedvt_variable_klass (klass)) {
5424 mono_class_init (klass);
5425 size = mono_class_array_element_size (klass);
5428 mult_reg = alloc_preg (cfg);
5429 array_reg = arr->dreg;
5430 index_reg = index->dreg;
5432 #if SIZEOF_REGISTER == 8
5433 /* The array reg is 64 bits but the index reg is only 32 */
5434 if (COMPILE_LLVM (cfg)) {
5436 index2_reg = index_reg;
5438 index2_reg = alloc_preg (cfg);
5439 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5442 if (index->type == STACK_I8) {
5443 index2_reg = alloc_preg (cfg);
5444 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5446 index2_reg = index_reg;
5451 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5453 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5454 if (size == 1 || size == 2 || size == 4 || size == 8) {
5455 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5457 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5458 ins->klass = mono_class_get_element_class (klass);
5459 ins->type = STACK_MP;
5465 add_reg = alloc_ireg_mp (cfg);
5468 MonoInst *rgctx_ins;
5471 g_assert (cfg->gshared);
5472 context_used = mini_class_check_context_used (cfg, klass);
5473 g_assert (context_used);
5474 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5475 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5479 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5480 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5481 ins->klass = mono_class_get_element_class (klass);
5482 ins->type = STACK_MP;
5483 MONO_ADD_INS (cfg->cbb, ins);
5489 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5491 int bounds_reg = alloc_preg (cfg);
5492 int add_reg = alloc_ireg_mp (cfg);
5493 int mult_reg = alloc_preg (cfg);
5494 int mult2_reg = alloc_preg (cfg);
5495 int low1_reg = alloc_preg (cfg);
5496 int low2_reg = alloc_preg (cfg);
5497 int high1_reg = alloc_preg (cfg);
5498 int high2_reg = alloc_preg (cfg);
5499 int realidx1_reg = alloc_preg (cfg);
5500 int realidx2_reg = alloc_preg (cfg);
5501 int sum_reg = alloc_preg (cfg);
5502 int index1, index2, tmpreg;
5506 mono_class_init (klass);
5507 size = mono_class_array_element_size (klass);
5509 index1 = index_ins1->dreg;
5510 index2 = index_ins2->dreg;
5512 #if SIZEOF_REGISTER == 8
5513 /* The array reg is 64 bits but the index reg is only 32 */
5514 if (COMPILE_LLVM (cfg)) {
5517 tmpreg = alloc_preg (cfg);
5518 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5520 tmpreg = alloc_preg (cfg);
5521 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5525 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5529 /* range checking */
5530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5531 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5534 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5535 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5537 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5538 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5539 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5542 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5543 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5545 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5547 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5549 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5550 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5552 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5553 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5555 ins->type = STACK_MP;
5557 MONO_ADD_INS (cfg->cbb, ins);
5563 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5567 MonoMethod *addr_method;
5569 MonoClass *eclass = cmethod->klass->element_class;
5571 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5574 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5576 /* emit_ldelema_2 depends on OP_LMUL */
5577 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5578 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5581 if (mini_is_gsharedvt_variable_klass (eclass))
5584 element_size = mono_class_array_element_size (eclass);
5585 addr_method = mono_marshal_get_array_address (rank, element_size);
5586 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5591 static MonoBreakPolicy
5592 always_insert_breakpoint (MonoMethod *method)
5594 return MONO_BREAK_POLICY_ALWAYS;
5597 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5600 * mono_set_break_policy:
5601 * policy_callback: the new callback function
5603 * Allow embedders to decide wherther to actually obey breakpoint instructions
5604 * (both break IL instructions and Debugger.Break () method calls), for example
5605 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5606 * untrusted or semi-trusted code.
5608 * @policy_callback will be called every time a break point instruction needs to
5609 * be inserted with the method argument being the method that calls Debugger.Break()
5610 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5611 * if it wants the breakpoint to not be effective in the given method.
5612 * #MONO_BREAK_POLICY_ALWAYS is the default.
5615 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5617 if (policy_callback)
5618 break_policy_func = policy_callback;
5620 break_policy_func = always_insert_breakpoint;
5624 should_insert_brekpoint (MonoMethod *method) {
5625 switch (break_policy_func (method)) {
5626 case MONO_BREAK_POLICY_ALWAYS:
5628 case MONO_BREAK_POLICY_NEVER:
5630 case MONO_BREAK_POLICY_ON_DBG:
5631 g_warning ("mdb no longer supported");
5634 g_warning ("Incorrect value returned from break policy callback");
5639 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5641 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5643 MonoInst *addr, *store, *load;
5644 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5646 /* the bounds check is already done by the callers */
5647 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5650 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5651 if (mini_type_is_reference (fsig->params [2]))
5652 emit_write_barrier (cfg, addr, load);
5654 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5655 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5662 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5664 return mini_type_is_reference (&klass->byval_arg);
5668 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5670 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5671 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5672 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5673 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5674 MonoInst *iargs [3];
5677 mono_class_setup_vtable (obj_array);
5678 g_assert (helper->slot);
5680 if (sp [0]->type != STACK_OBJ)
5682 if (sp [2]->type != STACK_OBJ)
5689 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5693 if (mini_is_gsharedvt_variable_klass (klass)) {
5696 // FIXME-VT: OP_ICONST optimization
5697 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5698 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5699 ins->opcode = OP_STOREV_MEMBASE;
5700 } else if (sp [1]->opcode == OP_ICONST) {
5701 int array_reg = sp [0]->dreg;
5702 int index_reg = sp [1]->dreg;
5703 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5705 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5706 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5709 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5710 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5712 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5714 if (generic_class_is_reference_type (cfg, klass))
5715 emit_write_barrier (cfg, addr, sp [2]);
5722 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5727 eklass = mono_class_from_mono_type (fsig->params [2]);
5729 eklass = mono_class_from_mono_type (fsig->ret);
5732 return emit_array_store (cfg, eklass, args, FALSE);
5734 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5735 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5741 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5744 int param_size, return_size;
5746 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5747 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5749 if (cfg->verbose_level > 3)
5750 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5752 //Don't allow mixing reference types with value types
5753 if (param_klass->valuetype != return_klass->valuetype) {
5754 if (cfg->verbose_level > 3)
5755 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5759 if (!param_klass->valuetype) {
5760 if (cfg->verbose_level > 3)
5761 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5766 if (param_klass->has_references || return_klass->has_references)
5769 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5770 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5771 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5772 if (cfg->verbose_level > 3)
5773 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5777 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5778 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5779 if (cfg->verbose_level > 3)
5780 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5784 param_size = mono_class_value_size (param_klass, &align);
5785 return_size = mono_class_value_size (return_klass, &align);
5787 //We can do it if sizes match
5788 if (param_size == return_size) {
5789 if (cfg->verbose_level > 3)
5790 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5794 //No simple way to handle struct if sizes don't match
5795 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5796 if (cfg->verbose_level > 3)
5797 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5802 * Same reg size category.
5803 * A quick note on why we don't require widening here.
5804 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5806 * Since the source value comes from a function argument, the JIT will already have
5807 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5809 if (param_size <= 4 && return_size <= 4) {
5810 if (cfg->verbose_level > 3)
5811 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5819 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5821 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5822 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5824 if (mini_is_gsharedvt_variable_type (fsig->ret))
5827 //Valuetypes that are semantically equivalent or numbers than can be widened to
5828 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5831 //Arrays of valuetypes that are semantically equivalent
5832 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5839 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5841 #ifdef MONO_ARCH_SIMD_INTRINSICS
5842 MonoInst *ins = NULL;
5844 if (cfg->opt & MONO_OPT_SIMD) {
5845 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5851 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5855 emit_memory_barrier (MonoCompile *cfg, int kind)
5857 MonoInst *ins = NULL;
5858 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5859 MONO_ADD_INS (cfg->cbb, ins);
5860 ins->backend.memory_barrier_kind = kind;
5866 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5868 MonoInst *ins = NULL;
5871 /* The LLVM backend supports these intrinsics */
5872 if (cmethod->klass == mono_defaults.math_class) {
5873 if (strcmp (cmethod->name, "Sin") == 0) {
5875 } else if (strcmp (cmethod->name, "Cos") == 0) {
5877 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5879 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5883 if (opcode && fsig->param_count == 1) {
5884 MONO_INST_NEW (cfg, ins, opcode);
5885 ins->type = STACK_R8;
5886 ins->dreg = mono_alloc_freg (cfg);
5887 ins->sreg1 = args [0]->dreg;
5888 MONO_ADD_INS (cfg->cbb, ins);
5892 if (cfg->opt & MONO_OPT_CMOV) {
5893 if (strcmp (cmethod->name, "Min") == 0) {
5894 if (fsig->params [0]->type == MONO_TYPE_I4)
5896 if (fsig->params [0]->type == MONO_TYPE_U4)
5897 opcode = OP_IMIN_UN;
5898 else if (fsig->params [0]->type == MONO_TYPE_I8)
5900 else if (fsig->params [0]->type == MONO_TYPE_U8)
5901 opcode = OP_LMIN_UN;
5902 } else if (strcmp (cmethod->name, "Max") == 0) {
5903 if (fsig->params [0]->type == MONO_TYPE_I4)
5905 if (fsig->params [0]->type == MONO_TYPE_U4)
5906 opcode = OP_IMAX_UN;
5907 else if (fsig->params [0]->type == MONO_TYPE_I8)
5909 else if (fsig->params [0]->type == MONO_TYPE_U8)
5910 opcode = OP_LMAX_UN;
5914 if (opcode && fsig->param_count == 2) {
5915 MONO_INST_NEW (cfg, ins, opcode);
5916 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5917 ins->dreg = mono_alloc_ireg (cfg);
5918 ins->sreg1 = args [0]->dreg;
5919 ins->sreg2 = args [1]->dreg;
5920 MONO_ADD_INS (cfg->cbb, ins);
5928 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5930 if (cmethod->klass == mono_defaults.array_class) {
5931 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5932 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5933 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5934 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5935 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5936 return emit_array_unsafe_mov (cfg, fsig, args);
5943 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5945 MonoInst *ins = NULL;
5947 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5949 if (cmethod->klass == mono_defaults.string_class) {
5950 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5951 int dreg = alloc_ireg (cfg);
5952 int index_reg = alloc_preg (cfg);
5953 int add_reg = alloc_preg (cfg);
5955 #if SIZEOF_REGISTER == 8
5956 if (COMPILE_LLVM (cfg)) {
5957 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5959 /* The array reg is 64 bits but the index reg is only 32 */
5960 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5963 index_reg = args [1]->dreg;
5965 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5967 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5968 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5969 add_reg = ins->dreg;
5970 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5973 int mult_reg = alloc_preg (cfg);
5974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5975 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5976 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5977 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5979 type_from_op (cfg, ins, NULL, NULL);
5981 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5982 int dreg = alloc_ireg (cfg);
5983 /* Decompose later to allow more optimizations */
5984 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5985 ins->type = STACK_I4;
5986 ins->flags |= MONO_INST_FAULT;
5987 cfg->cbb->has_array_access = TRUE;
5988 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5993 } else if (cmethod->klass == mono_defaults.object_class) {
5994 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5995 int dreg = alloc_ireg_ref (cfg);
5996 int vt_reg = alloc_preg (cfg);
5997 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5998 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5999 type_from_op (cfg, ins, NULL, NULL);
6002 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6003 int dreg = alloc_ireg (cfg);
6004 int t1 = alloc_ireg (cfg);
6006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6007 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6008 ins->type = STACK_I4;
6011 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6012 MONO_INST_NEW (cfg, ins, OP_NOP);
6013 MONO_ADD_INS (cfg->cbb, ins);
6017 } else if (cmethod->klass == mono_defaults.array_class) {
6018 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6019 return emit_array_generic_access (cfg, fsig, args, FALSE);
6020 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6021 return emit_array_generic_access (cfg, fsig, args, TRUE);
6023 #ifndef MONO_BIG_ARRAYS
6025 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6028 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6029 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6030 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6031 int dreg = alloc_ireg (cfg);
6032 int bounds_reg = alloc_ireg_mp (cfg);
6033 MonoBasicBlock *end_bb, *szarray_bb;
6034 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6036 NEW_BBLOCK (cfg, end_bb);
6037 NEW_BBLOCK (cfg, szarray_bb);
6039 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6040 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6043 /* Non-szarray case */
6045 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6046 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6048 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6049 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6051 MONO_START_BB (cfg, szarray_bb);
6054 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6055 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6057 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6058 MONO_START_BB (cfg, end_bb);
6060 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6061 ins->type = STACK_I4;
6067 if (cmethod->name [0] != 'g')
6070 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6071 int dreg = alloc_ireg (cfg);
6072 int vtable_reg = alloc_preg (cfg);
6073 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6074 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6075 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6076 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6077 type_from_op (cfg, ins, NULL, NULL);
6080 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6081 int dreg = alloc_ireg (cfg);
6083 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6084 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6085 type_from_op (cfg, ins, NULL, NULL);
6090 } else if (cmethod->klass == runtime_helpers_class) {
6091 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6092 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6096 } else if (cmethod->klass == mono_defaults.monitor_class) {
6097 gboolean is_enter = FALSE;
6098 gboolean is_v4 = FALSE;
6100 if (!strcmp (cmethod->name, "enter_with_atomic_var") && mono_method_signature (cmethod)->param_count == 2) {
6104 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6109 * To make async stack traces work, icalls which can block should have a wrapper.
6110 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6112 MonoBasicBlock *end_bb;
6114 NEW_BBLOCK (cfg, end_bb);
6116 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6119 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6120 MONO_START_BB (cfg, end_bb);
6123 } else if (cmethod->klass == mono_defaults.thread_class) {
6124 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6125 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6126 MONO_ADD_INS (cfg->cbb, ins);
6128 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6129 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6130 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6132 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6134 if (fsig->params [0]->type == MONO_TYPE_I1)
6135 opcode = OP_LOADI1_MEMBASE;
6136 else if (fsig->params [0]->type == MONO_TYPE_U1)
6137 opcode = OP_LOADU1_MEMBASE;
6138 else if (fsig->params [0]->type == MONO_TYPE_I2)
6139 opcode = OP_LOADI2_MEMBASE;
6140 else if (fsig->params [0]->type == MONO_TYPE_U2)
6141 opcode = OP_LOADU2_MEMBASE;
6142 else if (fsig->params [0]->type == MONO_TYPE_I4)
6143 opcode = OP_LOADI4_MEMBASE;
6144 else if (fsig->params [0]->type == MONO_TYPE_U4)
6145 opcode = OP_LOADU4_MEMBASE;
6146 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6147 opcode = OP_LOADI8_MEMBASE;
6148 else if (fsig->params [0]->type == MONO_TYPE_R4)
6149 opcode = OP_LOADR4_MEMBASE;
6150 else if (fsig->params [0]->type == MONO_TYPE_R8)
6151 opcode = OP_LOADR8_MEMBASE;
6152 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6153 opcode = OP_LOAD_MEMBASE;
6156 MONO_INST_NEW (cfg, ins, opcode);
6157 ins->inst_basereg = args [0]->dreg;
6158 ins->inst_offset = 0;
6159 MONO_ADD_INS (cfg->cbb, ins);
6161 switch (fsig->params [0]->type) {
6168 ins->dreg = mono_alloc_ireg (cfg);
6169 ins->type = STACK_I4;
6173 ins->dreg = mono_alloc_lreg (cfg);
6174 ins->type = STACK_I8;
6178 ins->dreg = mono_alloc_ireg (cfg);
6179 #if SIZEOF_REGISTER == 8
6180 ins->type = STACK_I8;
6182 ins->type = STACK_I4;
6187 ins->dreg = mono_alloc_freg (cfg);
6188 ins->type = STACK_R8;
6191 g_assert (mini_type_is_reference (fsig->params [0]));
6192 ins->dreg = mono_alloc_ireg_ref (cfg);
6193 ins->type = STACK_OBJ;
6197 if (opcode == OP_LOADI8_MEMBASE)
6198 ins = mono_decompose_opcode (cfg, ins);
6200 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6204 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6206 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6208 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6209 opcode = OP_STOREI1_MEMBASE_REG;
6210 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6211 opcode = OP_STOREI2_MEMBASE_REG;
6212 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6213 opcode = OP_STOREI4_MEMBASE_REG;
6214 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6215 opcode = OP_STOREI8_MEMBASE_REG;
6216 else if (fsig->params [0]->type == MONO_TYPE_R4)
6217 opcode = OP_STORER4_MEMBASE_REG;
6218 else if (fsig->params [0]->type == MONO_TYPE_R8)
6219 opcode = OP_STORER8_MEMBASE_REG;
6220 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6221 opcode = OP_STORE_MEMBASE_REG;
6224 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6226 MONO_INST_NEW (cfg, ins, opcode);
6227 ins->sreg1 = args [1]->dreg;
6228 ins->inst_destbasereg = args [0]->dreg;
6229 ins->inst_offset = 0;
6230 MONO_ADD_INS (cfg->cbb, ins);
6232 if (opcode == OP_STOREI8_MEMBASE_REG)
6233 ins = mono_decompose_opcode (cfg, ins);
6238 } else if (cmethod->klass->image == mono_defaults.corlib &&
6239 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6240 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6243 #if SIZEOF_REGISTER == 8
6244 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6245 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6246 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6247 ins->dreg = mono_alloc_preg (cfg);
6248 ins->sreg1 = args [0]->dreg;
6249 ins->type = STACK_I8;
6250 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6251 MONO_ADD_INS (cfg->cbb, ins);
6255 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6257 /* 64 bit reads are already atomic */
6258 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6259 load_ins->dreg = mono_alloc_preg (cfg);
6260 load_ins->inst_basereg = args [0]->dreg;
6261 load_ins->inst_offset = 0;
6262 load_ins->type = STACK_I8;
6263 MONO_ADD_INS (cfg->cbb, load_ins);
6265 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6272 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6273 MonoInst *ins_iconst;
6276 if (fsig->params [0]->type == MONO_TYPE_I4) {
6277 opcode = OP_ATOMIC_ADD_I4;
6278 cfg->has_atomic_add_i4 = TRUE;
6280 #if SIZEOF_REGISTER == 8
6281 else if (fsig->params [0]->type == MONO_TYPE_I8)
6282 opcode = OP_ATOMIC_ADD_I8;
6285 if (!mono_arch_opcode_supported (opcode))
6287 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6288 ins_iconst->inst_c0 = 1;
6289 ins_iconst->dreg = mono_alloc_ireg (cfg);
6290 MONO_ADD_INS (cfg->cbb, ins_iconst);
6292 MONO_INST_NEW (cfg, ins, opcode);
6293 ins->dreg = mono_alloc_ireg (cfg);
6294 ins->inst_basereg = args [0]->dreg;
6295 ins->inst_offset = 0;
6296 ins->sreg2 = ins_iconst->dreg;
6297 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6298 MONO_ADD_INS (cfg->cbb, ins);
6300 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6301 MonoInst *ins_iconst;
6304 if (fsig->params [0]->type == MONO_TYPE_I4) {
6305 opcode = OP_ATOMIC_ADD_I4;
6306 cfg->has_atomic_add_i4 = TRUE;
6308 #if SIZEOF_REGISTER == 8
6309 else if (fsig->params [0]->type == MONO_TYPE_I8)
6310 opcode = OP_ATOMIC_ADD_I8;
6313 if (!mono_arch_opcode_supported (opcode))
6315 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6316 ins_iconst->inst_c0 = -1;
6317 ins_iconst->dreg = mono_alloc_ireg (cfg);
6318 MONO_ADD_INS (cfg->cbb, ins_iconst);
6320 MONO_INST_NEW (cfg, ins, opcode);
6321 ins->dreg = mono_alloc_ireg (cfg);
6322 ins->inst_basereg = args [0]->dreg;
6323 ins->inst_offset = 0;
6324 ins->sreg2 = ins_iconst->dreg;
6325 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6326 MONO_ADD_INS (cfg->cbb, ins);
6328 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6331 if (fsig->params [0]->type == MONO_TYPE_I4) {
6332 opcode = OP_ATOMIC_ADD_I4;
6333 cfg->has_atomic_add_i4 = TRUE;
6335 #if SIZEOF_REGISTER == 8
6336 else if (fsig->params [0]->type == MONO_TYPE_I8)
6337 opcode = OP_ATOMIC_ADD_I8;
6340 if (!mono_arch_opcode_supported (opcode))
6342 MONO_INST_NEW (cfg, ins, opcode);
6343 ins->dreg = mono_alloc_ireg (cfg);
6344 ins->inst_basereg = args [0]->dreg;
6345 ins->inst_offset = 0;
6346 ins->sreg2 = args [1]->dreg;
6347 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6348 MONO_ADD_INS (cfg->cbb, ins);
6351 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6352 MonoInst *f2i = NULL, *i2f;
6353 guint32 opcode, f2i_opcode, i2f_opcode;
6354 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6355 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6357 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6358 fsig->params [0]->type == MONO_TYPE_R4) {
6359 opcode = OP_ATOMIC_EXCHANGE_I4;
6360 f2i_opcode = OP_MOVE_F_TO_I4;
6361 i2f_opcode = OP_MOVE_I4_TO_F;
6362 cfg->has_atomic_exchange_i4 = TRUE;
6364 #if SIZEOF_REGISTER == 8
6366 fsig->params [0]->type == MONO_TYPE_I8 ||
6367 fsig->params [0]->type == MONO_TYPE_R8 ||
6368 fsig->params [0]->type == MONO_TYPE_I) {
6369 opcode = OP_ATOMIC_EXCHANGE_I8;
6370 f2i_opcode = OP_MOVE_F_TO_I8;
6371 i2f_opcode = OP_MOVE_I8_TO_F;
6374 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6375 opcode = OP_ATOMIC_EXCHANGE_I4;
6376 cfg->has_atomic_exchange_i4 = TRUE;
6382 if (!mono_arch_opcode_supported (opcode))
6386 /* TODO: Decompose these opcodes instead of bailing here. */
6387 if (COMPILE_SOFT_FLOAT (cfg))
6390 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6391 f2i->dreg = mono_alloc_ireg (cfg);
6392 f2i->sreg1 = args [1]->dreg;
6393 if (f2i_opcode == OP_MOVE_F_TO_I4)
6394 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6395 MONO_ADD_INS (cfg->cbb, f2i);
6398 MONO_INST_NEW (cfg, ins, opcode);
6399 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6400 ins->inst_basereg = args [0]->dreg;
6401 ins->inst_offset = 0;
6402 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6403 MONO_ADD_INS (cfg->cbb, ins);
6405 switch (fsig->params [0]->type) {
6407 ins->type = STACK_I4;
6410 ins->type = STACK_I8;
6413 #if SIZEOF_REGISTER == 8
6414 ins->type = STACK_I8;
6416 ins->type = STACK_I4;
6421 ins->type = STACK_R8;
6424 g_assert (mini_type_is_reference (fsig->params [0]));
6425 ins->type = STACK_OBJ;
6430 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6431 i2f->dreg = mono_alloc_freg (cfg);
6432 i2f->sreg1 = ins->dreg;
6433 i2f->type = STACK_R8;
6434 if (i2f_opcode == OP_MOVE_I4_TO_F)
6435 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6436 MONO_ADD_INS (cfg->cbb, i2f);
6441 if (cfg->gen_write_barriers && is_ref)
6442 emit_write_barrier (cfg, args [0], args [1]);
6444 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6445 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6446 guint32 opcode, f2i_opcode, i2f_opcode;
6447 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6448 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6450 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6451 fsig->params [1]->type == MONO_TYPE_R4) {
6452 opcode = OP_ATOMIC_CAS_I4;
6453 f2i_opcode = OP_MOVE_F_TO_I4;
6454 i2f_opcode = OP_MOVE_I4_TO_F;
6455 cfg->has_atomic_cas_i4 = TRUE;
6457 #if SIZEOF_REGISTER == 8
6459 fsig->params [1]->type == MONO_TYPE_I8 ||
6460 fsig->params [1]->type == MONO_TYPE_R8 ||
6461 fsig->params [1]->type == MONO_TYPE_I) {
6462 opcode = OP_ATOMIC_CAS_I8;
6463 f2i_opcode = OP_MOVE_F_TO_I8;
6464 i2f_opcode = OP_MOVE_I8_TO_F;
6467 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6468 opcode = OP_ATOMIC_CAS_I4;
6469 cfg->has_atomic_cas_i4 = TRUE;
6475 if (!mono_arch_opcode_supported (opcode))
6479 /* TODO: Decompose these opcodes instead of bailing here. */
6480 if (COMPILE_SOFT_FLOAT (cfg))
6483 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6484 f2i_new->dreg = mono_alloc_ireg (cfg);
6485 f2i_new->sreg1 = args [1]->dreg;
6486 if (f2i_opcode == OP_MOVE_F_TO_I4)
6487 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6488 MONO_ADD_INS (cfg->cbb, f2i_new);
6490 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6491 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6492 f2i_cmp->sreg1 = args [2]->dreg;
6493 if (f2i_opcode == OP_MOVE_F_TO_I4)
6494 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6495 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6498 MONO_INST_NEW (cfg, ins, opcode);
6499 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6500 ins->sreg1 = args [0]->dreg;
6501 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6502 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6503 MONO_ADD_INS (cfg->cbb, ins);
6505 switch (fsig->params [1]->type) {
6507 ins->type = STACK_I4;
6510 ins->type = STACK_I8;
6513 #if SIZEOF_REGISTER == 8
6514 ins->type = STACK_I8;
6516 ins->type = STACK_I4;
6520 ins->type = cfg->r4_stack_type;
6523 ins->type = STACK_R8;
6526 g_assert (mini_type_is_reference (fsig->params [1]));
6527 ins->type = STACK_OBJ;
6532 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6533 i2f->dreg = mono_alloc_freg (cfg);
6534 i2f->sreg1 = ins->dreg;
6535 i2f->type = STACK_R8;
6536 if (i2f_opcode == OP_MOVE_I4_TO_F)
6537 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6538 MONO_ADD_INS (cfg->cbb, i2f);
6543 if (cfg->gen_write_barriers && is_ref)
6544 emit_write_barrier (cfg, args [0], args [1]);
6546 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6547 fsig->params [1]->type == MONO_TYPE_I4) {
6548 MonoInst *cmp, *ceq;
6550 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6553 /* int32 r = CAS (location, value, comparand); */
6554 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6555 ins->dreg = alloc_ireg (cfg);
6556 ins->sreg1 = args [0]->dreg;
6557 ins->sreg2 = args [1]->dreg;
6558 ins->sreg3 = args [2]->dreg;
6559 ins->type = STACK_I4;
6560 MONO_ADD_INS (cfg->cbb, ins);
6562 /* bool result = r == comparand; */
6563 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6564 cmp->sreg1 = ins->dreg;
6565 cmp->sreg2 = args [2]->dreg;
6566 cmp->type = STACK_I4;
6567 MONO_ADD_INS (cfg->cbb, cmp);
6569 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6570 ceq->dreg = alloc_ireg (cfg);
6571 ceq->type = STACK_I4;
6572 MONO_ADD_INS (cfg->cbb, ceq);
6574 /* *success = result; */
6575 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6577 cfg->has_atomic_cas_i4 = TRUE;
6579 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6580 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6584 } else if (cmethod->klass->image == mono_defaults.corlib &&
6585 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6586 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6589 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6591 MonoType *t = fsig->params [0];
6593 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6595 g_assert (t->byref);
6596 /* t is a byref type, so the reference check is more complicated */
6597 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6598 if (t->type == MONO_TYPE_I1)
6599 opcode = OP_ATOMIC_LOAD_I1;
6600 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6601 opcode = OP_ATOMIC_LOAD_U1;
6602 else if (t->type == MONO_TYPE_I2)
6603 opcode = OP_ATOMIC_LOAD_I2;
6604 else if (t->type == MONO_TYPE_U2)
6605 opcode = OP_ATOMIC_LOAD_U2;
6606 else if (t->type == MONO_TYPE_I4)
6607 opcode = OP_ATOMIC_LOAD_I4;
6608 else if (t->type == MONO_TYPE_U4)
6609 opcode = OP_ATOMIC_LOAD_U4;
6610 else if (t->type == MONO_TYPE_R4)
6611 opcode = OP_ATOMIC_LOAD_R4;
6612 else if (t->type == MONO_TYPE_R8)
6613 opcode = OP_ATOMIC_LOAD_R8;
6614 #if SIZEOF_REGISTER == 8
6615 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6616 opcode = OP_ATOMIC_LOAD_I8;
6617 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6618 opcode = OP_ATOMIC_LOAD_U8;
6620 else if (t->type == MONO_TYPE_I)
6621 opcode = OP_ATOMIC_LOAD_I4;
6622 else if (is_ref || t->type == MONO_TYPE_U)
6623 opcode = OP_ATOMIC_LOAD_U4;
6627 if (!mono_arch_opcode_supported (opcode))
6630 MONO_INST_NEW (cfg, ins, opcode);
6631 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6632 ins->sreg1 = args [0]->dreg;
6633 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6634 MONO_ADD_INS (cfg->cbb, ins);
6637 case MONO_TYPE_BOOLEAN:
6644 ins->type = STACK_I4;
6648 ins->type = STACK_I8;
6652 #if SIZEOF_REGISTER == 8
6653 ins->type = STACK_I8;
6655 ins->type = STACK_I4;
6659 ins->type = cfg->r4_stack_type;
6662 ins->type = STACK_R8;
6666 ins->type = STACK_OBJ;
6672 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6674 MonoType *t = fsig->params [0];
6677 g_assert (t->byref);
6678 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6679 if (t->type == MONO_TYPE_I1)
6680 opcode = OP_ATOMIC_STORE_I1;
6681 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6682 opcode = OP_ATOMIC_STORE_U1;
6683 else if (t->type == MONO_TYPE_I2)
6684 opcode = OP_ATOMIC_STORE_I2;
6685 else if (t->type == MONO_TYPE_U2)
6686 opcode = OP_ATOMIC_STORE_U2;
6687 else if (t->type == MONO_TYPE_I4)
6688 opcode = OP_ATOMIC_STORE_I4;
6689 else if (t->type == MONO_TYPE_U4)
6690 opcode = OP_ATOMIC_STORE_U4;
6691 else if (t->type == MONO_TYPE_R4)
6692 opcode = OP_ATOMIC_STORE_R4;
6693 else if (t->type == MONO_TYPE_R8)
6694 opcode = OP_ATOMIC_STORE_R8;
6695 #if SIZEOF_REGISTER == 8
6696 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6697 opcode = OP_ATOMIC_STORE_I8;
6698 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6699 opcode = OP_ATOMIC_STORE_U8;
6701 else if (t->type == MONO_TYPE_I)
6702 opcode = OP_ATOMIC_STORE_I4;
6703 else if (is_ref || t->type == MONO_TYPE_U)
6704 opcode = OP_ATOMIC_STORE_U4;
6708 if (!mono_arch_opcode_supported (opcode))
6711 MONO_INST_NEW (cfg, ins, opcode);
6712 ins->dreg = args [0]->dreg;
6713 ins->sreg1 = args [1]->dreg;
6714 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6715 MONO_ADD_INS (cfg->cbb, ins);
6717 if (cfg->gen_write_barriers && is_ref)
6718 emit_write_barrier (cfg, args [0], args [1]);
6724 } else if (cmethod->klass->image == mono_defaults.corlib &&
6725 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6726 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6727 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6728 if (should_insert_brekpoint (cfg->method)) {
6729 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6731 MONO_INST_NEW (cfg, ins, OP_NOP);
6732 MONO_ADD_INS (cfg->cbb, ins);
6736 } else if (cmethod->klass->image == mono_defaults.corlib &&
6737 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6738 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6739 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6741 EMIT_NEW_ICONST (cfg, ins, 1);
6743 EMIT_NEW_ICONST (cfg, ins, 0);
6746 } else if (cmethod->klass->image == mono_defaults.corlib &&
6747 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6748 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6749 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6750 /* No stack walks are currently available, so implement this as an intrinsic */
6751 MonoInst *assembly_ins;
6753 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6754 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6757 } else if (cmethod->klass->image == mono_defaults.corlib &&
6758 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6759 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6760 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6761 /* No stack walks are currently available, so implement this as an intrinsic */
6762 MonoInst *method_ins;
6763 MonoMethod *declaring = cfg->method;
6765 /* This returns the declaring generic method */
6766 if (declaring->is_inflated)
6767 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6768 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6769 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6770 cfg->no_inline = TRUE;
6771 if (cfg->method != cfg->current_method)
6772 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6775 } else if (cmethod->klass == mono_defaults.math_class) {
6777 * There is general branchless code for Min/Max, but it does not work for
6779 * http://everything2.com/?node_id=1051618
6781 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6782 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6783 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6784 !strcmp (cmethod->klass->name, "Selector")) ||
6785 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6786 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6787 !strcmp (cmethod->klass->name, "Selector"))
6789 if (cfg->backend->have_objc_get_selector &&
6790 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6791 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6792 cfg->compile_aot && !cfg->llvm_only) {
6794 MonoJumpInfoToken *ji;
6799 cfg->exception_message = g_strdup ("GetHandle");
6800 cfg->disable_llvm = TRUE;
6802 if (args [0]->opcode == OP_GOT_ENTRY) {
6803 pi = (MonoInst *)args [0]->inst_p1;
6804 g_assert (pi->opcode == OP_PATCH_INFO);
6805 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6806 ji = (MonoJumpInfoToken *)pi->inst_p0;
6808 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6809 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6812 NULLIFY_INS (args [0]);
6815 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6816 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6817 ins->dreg = mono_alloc_ireg (cfg);
6819 ins->inst_p0 = mono_string_to_utf8 (s);
6820 MONO_ADD_INS (cfg->cbb, ins);
6825 #ifdef MONO_ARCH_SIMD_INTRINSICS
6826 if (cfg->opt & MONO_OPT_SIMD) {
6827 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6833 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6837 if (COMPILE_LLVM (cfg)) {
6838 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6843 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6847 * This entry point could be used later for arbitrary method
6850 inline static MonoInst*
6851 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6852 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6854 if (method->klass == mono_defaults.string_class) {
6855 /* managed string allocation support */
6856 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6857 MonoInst *iargs [2];
6858 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6859 MonoMethod *managed_alloc = NULL;
6861 g_assert (vtable); /*Should not fail since it System.String*/
6862 #ifndef MONO_CROSS_COMPILE
6863 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6867 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6868 iargs [1] = args [0];
6869 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6876 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6878 MonoInst *store, *temp;
6881 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6882 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6885 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6886 * would be different than the MonoInst's used to represent arguments, and
6887 * the ldelema implementation can't deal with that.
6888 * Solution: When ldelema is used on an inline argument, create a var for
6889 * it, emit ldelema on that var, and emit the saving code below in
6890 * inline_method () if needed.
6892 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6893 cfg->args [i] = temp;
6894 /* This uses cfg->args [i] which is set by the preceeding line */
6895 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6896 store->cil_code = sp [0]->cil_code;
6901 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6902 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6904 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6906 check_inline_called_method_name_limit (MonoMethod *called_method)
6909 static const char *limit = NULL;
6911 if (limit == NULL) {
6912 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6914 if (limit_string != NULL)
6915 limit = limit_string;
6920 if (limit [0] != '\0') {
6921 char *called_method_name = mono_method_full_name (called_method, TRUE);
6923 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6924 g_free (called_method_name);
6926 //return (strncmp_result <= 0);
6927 return (strncmp_result == 0);
6934 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6936 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6939 static const char *limit = NULL;
6941 if (limit == NULL) {
6942 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6943 if (limit_string != NULL) {
6944 limit = limit_string;
6950 if (limit [0] != '\0') {
6951 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6953 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6954 g_free (caller_method_name);
6956 //return (strncmp_result <= 0);
6957 return (strncmp_result == 0);
6965 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6967 static double r8_0 = 0.0;
6968 static float r4_0 = 0.0;
6972 rtype = mini_get_underlying_type (rtype);
6976 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6977 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6978 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6979 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6980 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6981 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6982 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6983 ins->type = STACK_R4;
6984 ins->inst_p0 = (void*)&r4_0;
6986 MONO_ADD_INS (cfg->cbb, ins);
6987 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6988 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6989 ins->type = STACK_R8;
6990 ins->inst_p0 = (void*)&r8_0;
6992 MONO_ADD_INS (cfg->cbb, ins);
6993 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6994 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6995 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6996 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6997 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6999 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7004 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7008 rtype = mini_get_underlying_type (rtype);
7012 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7013 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7014 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7015 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7016 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7017 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7018 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7019 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7020 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7021 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7022 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7023 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7024 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7025 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7027 emit_init_rvar (cfg, dreg, rtype);
7031 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7033 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7035 MonoInst *var = cfg->locals [local];
7036 if (COMPILE_SOFT_FLOAT (cfg)) {
7038 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7039 emit_init_rvar (cfg, reg, type);
7040 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7043 emit_init_rvar (cfg, var->dreg, type);
7045 emit_dummy_init_rvar (cfg, var->dreg, type);
7052 * Return the cost of inlining CMETHOD.
7055 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7056 guchar *ip, guint real_offset, gboolean inline_always)
7059 MonoInst *ins, *rvar = NULL;
7060 MonoMethodHeader *cheader;
7061 MonoBasicBlock *ebblock, *sbblock;
7063 MonoMethod *prev_inlined_method;
7064 MonoInst **prev_locals, **prev_args;
7065 MonoType **prev_arg_types;
7066 guint prev_real_offset;
7067 GHashTable *prev_cbb_hash;
7068 MonoBasicBlock **prev_cil_offset_to_bb;
7069 MonoBasicBlock *prev_cbb;
7070 unsigned char* prev_cil_start;
7071 guint32 prev_cil_offset_to_bb_len;
7072 MonoMethod *prev_current_method;
7073 MonoGenericContext *prev_generic_context;
7074 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7076 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7078 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7079 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7082 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7083 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7088 fsig = mono_method_signature (cmethod);
7090 if (cfg->verbose_level > 2)
7091 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7093 if (!cmethod->inline_info) {
7094 cfg->stat_inlineable_methods++;
7095 cmethod->inline_info = 1;
7098 /* allocate local variables */
7099 cheader = mono_method_get_header_checked (cmethod, &error);
7101 if (inline_always) {
7102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7103 mono_error_move (&cfg->error, &error);
7105 mono_error_cleanup (&error);
7110 /*Must verify before creating locals as it can cause the JIT to assert.*/
7111 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7112 mono_metadata_free_mh (cheader);
7116 /* allocate space to store the return value */
7117 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7118 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7121 prev_locals = cfg->locals;
7122 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7123 for (i = 0; i < cheader->num_locals; ++i)
7124 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7126 /* allocate start and end blocks */
7127 /* This is needed so if the inline is aborted, we can clean up */
7128 NEW_BBLOCK (cfg, sbblock);
7129 sbblock->real_offset = real_offset;
7131 NEW_BBLOCK (cfg, ebblock);
7132 ebblock->block_num = cfg->num_bblocks++;
7133 ebblock->real_offset = real_offset;
7135 prev_args = cfg->args;
7136 prev_arg_types = cfg->arg_types;
7137 prev_inlined_method = cfg->inlined_method;
7138 cfg->inlined_method = cmethod;
7139 cfg->ret_var_set = FALSE;
7140 cfg->inline_depth ++;
7141 prev_real_offset = cfg->real_offset;
7142 prev_cbb_hash = cfg->cbb_hash;
7143 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7144 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7145 prev_cil_start = cfg->cil_start;
7146 prev_cbb = cfg->cbb;
7147 prev_current_method = cfg->current_method;
7148 prev_generic_context = cfg->generic_context;
7149 prev_ret_var_set = cfg->ret_var_set;
7150 prev_disable_inline = cfg->disable_inline;
7152 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7155 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7157 ret_var_set = cfg->ret_var_set;
7159 cfg->inlined_method = prev_inlined_method;
7160 cfg->real_offset = prev_real_offset;
7161 cfg->cbb_hash = prev_cbb_hash;
7162 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7163 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7164 cfg->cil_start = prev_cil_start;
7165 cfg->locals = prev_locals;
7166 cfg->args = prev_args;
7167 cfg->arg_types = prev_arg_types;
7168 cfg->current_method = prev_current_method;
7169 cfg->generic_context = prev_generic_context;
7170 cfg->ret_var_set = prev_ret_var_set;
7171 cfg->disable_inline = prev_disable_inline;
7172 cfg->inline_depth --;
7174 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7175 if (cfg->verbose_level > 2)
7176 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7178 cfg->stat_inlined_methods++;
7180 /* always add some code to avoid block split failures */
7181 MONO_INST_NEW (cfg, ins, OP_NOP);
7182 MONO_ADD_INS (prev_cbb, ins);
7184 prev_cbb->next_bb = sbblock;
7185 link_bblock (cfg, prev_cbb, sbblock);
7188 * Get rid of the begin and end bblocks if possible to aid local
7191 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7193 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7194 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7196 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7197 MonoBasicBlock *prev = ebblock->in_bb [0];
7199 if (prev->next_bb == ebblock) {
7200 mono_merge_basic_blocks (cfg, prev, ebblock);
7202 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7203 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7204 cfg->cbb = prev_cbb;
7207 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7212 * Its possible that the rvar is set in some prev bblock, but not in others.
7218 for (i = 0; i < ebblock->in_count; ++i) {
7219 bb = ebblock->in_bb [i];
7221 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7224 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7234 * If the inlined method contains only a throw, then the ret var is not
7235 * set, so set it to a dummy value.
7238 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7240 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7243 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7246 if (cfg->verbose_level > 2)
7247 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7248 cfg->exception_type = MONO_EXCEPTION_NONE;
7249 mono_loader_clear_error ();
7251 /* This gets rid of the newly added bblocks */
7252 cfg->cbb = prev_cbb;
7254 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7259 * Some of these comments may well be out-of-date.
7260 * Design decisions: we do a single pass over the IL code (and we do bblock
7261 * splitting/merging in the few cases when it's required: a back jump to an IL
7262 * address that was not already seen as bblock starting point).
7263 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7264 * Complex operations are decomposed in simpler ones right away. We need to let the
7265 * arch-specific code peek and poke inside this process somehow (except when the
7266 * optimizations can take advantage of the full semantic info of coarse opcodes).
7267 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7268 * MonoInst->opcode initially is the IL opcode or some simplification of that
7269 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7270 * opcode with value bigger than OP_LAST.
7271 * At this point the IR can be handed over to an interpreter, a dumb code generator
7272 * or to the optimizing code generator that will translate it to SSA form.
7274 * Profiling directed optimizations.
7275 * We may compile by default with few or no optimizations and instrument the code
7276 * or the user may indicate what methods to optimize the most either in a config file
7277 * or through repeated runs where the compiler applies offline the optimizations to
7278 * each method and then decides if it was worth it.
7281 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7282 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7283 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7284 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7285 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7286 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7287 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7288 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7290 /* offset from br.s -> br like opcodes */
7291 #define BIG_BRANCH_OFFSET 13
7294 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7296 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7298 return b == NULL || b == bb;
7302 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7304 unsigned char *ip = start;
7305 unsigned char *target;
7308 MonoBasicBlock *bblock;
7309 const MonoOpcode *opcode;
7312 cli_addr = ip - start;
7313 i = mono_opcode_value ((const guint8 **)&ip, end);
7316 opcode = &mono_opcodes [i];
7317 switch (opcode->argument) {
7318 case MonoInlineNone:
7321 case MonoInlineString:
7322 case MonoInlineType:
7323 case MonoInlineField:
7324 case MonoInlineMethod:
7327 case MonoShortInlineR:
7334 case MonoShortInlineVar:
7335 case MonoShortInlineI:
7338 case MonoShortInlineBrTarget:
7339 target = start + cli_addr + 2 + (signed char)ip [1];
7340 GET_BBLOCK (cfg, bblock, target);
7343 GET_BBLOCK (cfg, bblock, ip);
7345 case MonoInlineBrTarget:
7346 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7347 GET_BBLOCK (cfg, bblock, target);
7350 GET_BBLOCK (cfg, bblock, ip);
7352 case MonoInlineSwitch: {
7353 guint32 n = read32 (ip + 1);
7356 cli_addr += 5 + 4 * n;
7357 target = start + cli_addr;
7358 GET_BBLOCK (cfg, bblock, target);
7360 for (j = 0; j < n; ++j) {
7361 target = start + cli_addr + (gint32)read32 (ip);
7362 GET_BBLOCK (cfg, bblock, target);
7372 g_assert_not_reached ();
7375 if (i == CEE_THROW) {
7376 unsigned char *bb_start = ip - 1;
7378 /* Find the start of the bblock containing the throw */
7380 while ((bb_start >= start) && !bblock) {
7381 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7385 bblock->out_of_line = 1;
7395 static inline MonoMethod *
7396 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7400 mono_error_init (error);
7402 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7403 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7405 method = mono_class_inflate_generic_method_checked (method, context, error);
7408 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7414 static inline MonoMethod *
7415 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7418 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7420 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7421 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7425 if (!method && !cfg)
7426 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7431 static inline MonoClass*
7432 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7437 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7438 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7440 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7441 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7444 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7445 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7448 mono_class_init (klass);
7452 static inline MonoMethodSignature*
7453 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7455 MonoMethodSignature *fsig;
7457 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7458 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7460 fsig = mono_metadata_parse_signature (method->klass->image, token);
7464 fsig = mono_inflate_generic_signature(fsig, context, &error);
7466 g_assert(mono_error_ok(&error));
7472 throw_exception (void)
7474 static MonoMethod *method = NULL;
7477 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7478 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7485 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7487 MonoMethod *thrower = throw_exception ();
7490 EMIT_NEW_PCONST (cfg, args [0], ex);
7491 mono_emit_method_call (cfg, thrower, args, NULL);
7495 * Return the original method is a wrapper is specified. We can only access
7496 * the custom attributes from the original method.
7499 get_original_method (MonoMethod *method)
7501 if (method->wrapper_type == MONO_WRAPPER_NONE)
7504 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7505 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7508 /* in other cases we need to find the original method */
7509 return mono_marshal_method_from_wrapper (method);
7513 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7515 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7516 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7518 emit_throw_exception (cfg, ex);
7522 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7524 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7525 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7527 emit_throw_exception (cfg, ex);
7531 * Check that the IL instructions at ip are the array initialization
7532 * sequence and return the pointer to the data and the size.
7535 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7538 * newarr[System.Int32]
7540 * ldtoken field valuetype ...
7541 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7543 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7545 guint32 token = read32 (ip + 7);
7546 guint32 field_token = read32 (ip + 2);
7547 guint32 field_index = field_token & 0xffffff;
7549 const char *data_ptr;
7551 MonoMethod *cmethod;
7552 MonoClass *dummy_class;
7553 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7557 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7561 *out_field_token = field_token;
7563 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7566 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7568 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7569 case MONO_TYPE_BOOLEAN:
7573 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7574 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7575 case MONO_TYPE_CHAR:
7592 if (size > mono_type_size (field->type, &dummy_align))
7595 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7596 if (!image_is_dynamic (method->klass->image)) {
7597 field_index = read32 (ip + 2) & 0xffffff;
7598 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7599 data_ptr = mono_image_rva_map (method->klass->image, rva);
7600 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7601 /* for aot code we do the lookup on load */
7602 if (aot && data_ptr)
7603 return (const char *)GUINT_TO_POINTER (rva);
7605 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7607 data_ptr = mono_field_get_data (field);
7615 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7618 char *method_fname = mono_method_full_name (method, TRUE);
7620 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7623 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7624 mono_error_cleanup (&error);
7625 } else if (header->code_size == 0)
7626 method_code = g_strdup ("method body is empty.");
7628 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7629 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7630 g_free (method_fname);
7631 g_free (method_code);
7632 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7636 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7639 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7640 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7641 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7642 /* Optimize reg-reg moves away */
7644 * Can't optimize other opcodes, since sp[0] might point to
7645 * the last ins of a decomposed opcode.
7647 sp [0]->dreg = (cfg)->locals [n]->dreg;
7649 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7654 * ldloca inhibits many optimizations so try to get rid of it in common
7657 static inline unsigned char *
7658 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7668 local = read16 (ip + 2);
7672 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7673 /* From the INITOBJ case */
7674 token = read32 (ip + 2);
7675 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7676 CHECK_TYPELOAD (klass);
7677 type = mini_get_underlying_type (&klass->byval_arg);
7678 emit_init_local (cfg, local, type, TRUE);
7686 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7688 MonoInst *icall_args [16];
7689 MonoInst *call_target, *ins, *vtable_ins;
7690 int arg_reg, this_reg, vtable_reg;
7691 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7692 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7693 gboolean variant_iface = FALSE;
7698 * In llvm-only mode, vtables contain function descriptors instead of
7699 * method addresses/trampolines.
7701 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7704 slot = mono_method_get_imt_slot (cmethod);
7706 slot = mono_method_get_vtable_index (cmethod);
7708 this_reg = sp [0]->dreg;
7710 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7711 variant_iface = TRUE;
7713 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7715 * The simplest case, a normal virtual call.
7717 int slot_reg = alloc_preg (cfg);
7718 int addr_reg = alloc_preg (cfg);
7719 int arg_reg = alloc_preg (cfg);
7720 MonoBasicBlock *non_null_bb;
7722 vtable_reg = alloc_preg (cfg);
7723 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7724 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7726 /* Load the vtable slot, which contains a function descriptor. */
7727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7729 NEW_BBLOCK (cfg, non_null_bb);
7731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7732 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7736 // FIXME: Make the wrapper use the preserveall cconv
7737 // FIXME: Use one icall per slot for small slot numbers ?
7738 icall_args [0] = vtable_ins;
7739 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7740 /* Make the icall return the vtable slot value to save some code space */
7741 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7742 ins->dreg = slot_reg;
7743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7746 MONO_START_BB (cfg, non_null_bb);
7747 /* Load the address + arg from the vtable slot */
7748 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7751 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7754 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7756 * A simple interface call
7758 * We make a call through an imt slot to obtain the function descriptor we need to call.
7759 * The imt slot contains a function descriptor for a runtime function + arg.
7761 int slot_reg = alloc_preg (cfg);
7762 int addr_reg = alloc_preg (cfg);
7763 int arg_reg = alloc_preg (cfg);
7764 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7766 vtable_reg = alloc_preg (cfg);
7767 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7768 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7771 * The slot is already initialized when the vtable is created so there is no need
7775 /* Load the imt slot, which contains a function descriptor. */
7776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7778 /* Load the address + arg of the imt thunk from the imt slot */
7779 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7780 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7782 * IMT thunks in llvm-only mode are C functions which take an info argument
7783 * plus the imt method and return the ftndesc to call.
7785 icall_args [0] = thunk_arg_ins;
7786 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7787 cmethod, MONO_RGCTX_INFO_METHOD);
7788 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7790 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7793 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7795 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7796 * dynamically extended as more instantiations are discovered.
7797 * This handles generic virtual methods both on classes and interfaces.
7799 int slot_reg = alloc_preg (cfg);
7800 int addr_reg = alloc_preg (cfg);
7801 int arg_reg = alloc_preg (cfg);
7802 int ftndesc_reg = alloc_preg (cfg);
7803 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7804 MonoBasicBlock *slowpath_bb, *end_bb;
7806 NEW_BBLOCK (cfg, slowpath_bb);
7807 NEW_BBLOCK (cfg, end_bb);
7809 vtable_reg = alloc_preg (cfg);
7810 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7812 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7814 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7816 /* Load the slot, which contains a function descriptor. */
7817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7819 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7820 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7821 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7822 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7825 /* Same as with iface calls */
7826 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7827 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7828 icall_args [0] = thunk_arg_ins;
7829 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7830 cmethod, MONO_RGCTX_INFO_METHOD);
7831 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7832 ftndesc_ins->dreg = ftndesc_reg;
7834 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7835 * they don't know about yet. Fall back to the slowpath in that case.
7837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7843 MONO_START_BB (cfg, slowpath_bb);
7844 icall_args [0] = vtable_ins;
7845 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7846 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7847 cmethod, MONO_RGCTX_INFO_METHOD);
7849 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7851 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7852 ftndesc_ins->dreg = ftndesc_reg;
7853 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7856 MONO_START_BB (cfg, end_bb);
7857 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7861 * Non-optimized cases
7863 icall_args [0] = sp [0];
7864 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7866 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7867 cmethod, MONO_RGCTX_INFO_METHOD);
7869 arg_reg = alloc_preg (cfg);
7870 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7871 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7873 g_assert (is_gsharedvt);
7875 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7877 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7880 * Pass the extra argument even if the callee doesn't receive it, most
7881 * calling conventions allow this.
7883 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7887 is_exception_class (MonoClass *klass)
7890 if (klass == mono_defaults.exception_class)
7892 klass = klass->parent;
7898 * is_jit_optimizer_disabled:
7900 * Determine whenever M's assembly has a DebuggableAttribute with the
7901 * IsJITOptimizerDisabled flag set.
7904 is_jit_optimizer_disabled (MonoMethod *m)
7907 MonoAssembly *ass = m->klass->image->assembly;
7908 MonoCustomAttrInfo* attrs;
7911 gboolean val = FALSE;
7914 if (ass->jit_optimizer_disabled_inited)
7915 return ass->jit_optimizer_disabled;
7917 klass = mono_class_try_get_debuggable_attribute_class ();
7921 ass->jit_optimizer_disabled = FALSE;
7922 mono_memory_barrier ();
7923 ass->jit_optimizer_disabled_inited = TRUE;
7927 attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
7928 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7930 for (i = 0; i < attrs->num_attrs; ++i) {
7931 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7933 MonoMethodSignature *sig;
7935 if (!attr->ctor || attr->ctor->klass != klass)
7937 /* Decode the attribute. See reflection.c */
7938 p = (const char*)attr->data;
7939 g_assert (read16 (p) == 0x0001);
7942 // FIXME: Support named parameters
7943 sig = mono_method_signature (attr->ctor);
7944 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7946 /* Two boolean arguments */
7950 mono_custom_attrs_free (attrs);
7953 ass->jit_optimizer_disabled = val;
7954 mono_memory_barrier ();
7955 ass->jit_optimizer_disabled_inited = TRUE;
7961 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7963 gboolean supported_tail_call;
7966 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7968 for (i = 0; i < fsig->param_count; ++i) {
7969 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7970 /* These can point to the current method's stack */
7971 supported_tail_call = FALSE;
7973 if (fsig->hasthis && cmethod->klass->valuetype)
7974 /* this might point to the current method's stack */
7975 supported_tail_call = FALSE;
7976 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7977 supported_tail_call = FALSE;
7978 if (cfg->method->save_lmf)
7979 supported_tail_call = FALSE;
7980 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7981 supported_tail_call = FALSE;
7982 if (call_opcode != CEE_CALL)
7983 supported_tail_call = FALSE;
7985 /* Debugging support */
7987 if (supported_tail_call) {
7988 if (!mono_debug_count ())
7989 supported_tail_call = FALSE;
7993 return supported_tail_call;
7999 * Handle calls made to ctors from NEWOBJ opcodes.
8002 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
8003 MonoInst **sp, guint8 *ip, int *inline_costs)
8005 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8007 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8008 mono_method_is_generic_sharable (cmethod, TRUE)) {
8009 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8010 mono_class_vtable (cfg->domain, cmethod->klass);
8011 CHECK_TYPELOAD (cmethod->klass);
8013 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8014 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8017 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8018 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8020 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8022 CHECK_TYPELOAD (cmethod->klass);
8023 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8028 /* Avoid virtual calls to ctors if possible */
8029 if (mono_class_is_marshalbyref (cmethod->klass))
8030 callvirt_this_arg = sp [0];
8032 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8033 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8034 CHECK_CFG_EXCEPTION;
8035 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8036 mono_method_check_inlining (cfg, cmethod) &&
8037 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8040 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8041 cfg->real_offset += 5;
8043 *inline_costs += costs - 5;
8045 INLINE_FAILURE ("inline failure");
8046 // FIXME-VT: Clean this up
8047 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8048 GSHAREDVT_FAILURE(*ip);
8049 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8051 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8054 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8056 if (cfg->llvm_only) {
8057 // FIXME: Avoid initializing vtable_arg
8058 emit_llvmonly_calli (cfg, fsig, sp, addr);
8060 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8062 } else if (context_used &&
8063 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8064 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8065 MonoInst *cmethod_addr;
8067 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8069 if (cfg->llvm_only) {
8070 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8071 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8072 emit_llvmonly_calli (cfg, fsig, sp, addr);
8074 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8075 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8077 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8080 INLINE_FAILURE ("ctor call");
8081 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8082 callvirt_this_arg, NULL, vtable_arg);
8089 emit_setret (MonoCompile *cfg, MonoInst *val)
8091 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8094 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8097 if (!cfg->vret_addr) {
8098 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8100 EMIT_NEW_RETLOADA (cfg, ret_addr);
8102 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8103 ins->klass = mono_class_from_mono_type (ret_type);
8106 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8107 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8108 MonoInst *iargs [1];
8112 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8113 mono_arch_emit_setret (cfg, cfg->method, conv);
8115 mono_arch_emit_setret (cfg, cfg->method, val);
8118 mono_arch_emit_setret (cfg, cfg->method, val);
8124 * mono_method_to_ir:
8126 * Translate the .net IL into linear IR.
8129 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8130 MonoInst *return_var, MonoInst **inline_args,
8131 guint inline_offset, gboolean is_virtual_call)
8134 MonoInst *ins, **sp, **stack_start;
8135 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8136 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8137 MonoMethod *cmethod, *method_definition;
8138 MonoInst **arg_array;
8139 MonoMethodHeader *header;
8141 guint32 token, ins_flag;
8143 MonoClass *constrained_class = NULL;
8144 unsigned char *ip, *end, *target, *err_pos;
8145 MonoMethodSignature *sig;
8146 MonoGenericContext *generic_context = NULL;
8147 MonoGenericContainer *generic_container = NULL;
8148 MonoType **param_types;
8149 int i, n, start_new_bblock, dreg;
8150 int num_calls = 0, inline_costs = 0;
8151 int breakpoint_id = 0;
8153 GSList *class_inits = NULL;
8154 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8156 gboolean init_locals, seq_points, skip_dead_blocks;
8157 gboolean sym_seq_points = FALSE;
8158 MonoDebugMethodInfo *minfo;
8159 MonoBitSet *seq_point_locs = NULL;
8160 MonoBitSet *seq_point_set_locs = NULL;
8162 cfg->disable_inline = is_jit_optimizer_disabled (method);
8164 /* serialization and xdomain stuff may need access to private fields and methods */
8165 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8166 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8167 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8168 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8169 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8170 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8172 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8173 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8174 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8175 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8176 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8178 image = method->klass->image;
8179 header = mono_method_get_header_checked (method, &cfg->error);
8181 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8182 goto exception_exit;
8184 generic_container = mono_method_get_generic_container (method);
8185 sig = mono_method_signature (method);
8186 num_args = sig->hasthis + sig->param_count;
8187 ip = (unsigned char*)header->code;
8188 cfg->cil_start = ip;
8189 end = ip + header->code_size;
8190 cfg->stat_cil_code_size += header->code_size;
8192 seq_points = cfg->gen_seq_points && cfg->method == method;
8194 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8195 /* We could hit a seq point before attaching to the JIT (#8338) */
8199 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8200 minfo = mono_debug_lookup_method (method);
8202 MonoSymSeqPoint *sps;
8203 int i, n_il_offsets;
8205 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8206 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8207 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8208 sym_seq_points = TRUE;
8209 for (i = 0; i < n_il_offsets; ++i) {
8210 if (sps [i].il_offset < header->code_size)
8211 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8214 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8215 /* Methods without line number info like auto-generated property accessors */
8216 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8217 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8218 sym_seq_points = TRUE;
8223 * Methods without init_locals set could cause asserts in various passes
8224 * (#497220). To work around this, we emit dummy initialization opcodes
8225 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8226 * on some platforms.
8228 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8229 init_locals = header->init_locals;
8233 method_definition = method;
8234 while (method_definition->is_inflated) {
8235 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8236 method_definition = imethod->declaring;
8239 /* SkipVerification is not allowed if core-clr is enabled */
8240 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8242 dont_verify_stloc = TRUE;
8245 if (sig->is_inflated)
8246 generic_context = mono_method_get_context (method);
8247 else if (generic_container)
8248 generic_context = &generic_container->context;
8249 cfg->generic_context = generic_context;
8252 g_assert (!sig->has_type_parameters);
8254 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8255 g_assert (method->is_inflated);
8256 g_assert (mono_method_get_context (method)->method_inst);
8258 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8259 g_assert (sig->generic_param_count);
8261 if (cfg->method == method) {
8262 cfg->real_offset = 0;
8264 cfg->real_offset = inline_offset;
8267 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8268 cfg->cil_offset_to_bb_len = header->code_size;
8270 cfg->current_method = method;
8272 if (cfg->verbose_level > 2)
8273 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8275 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8277 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8278 for (n = 0; n < sig->param_count; ++n)
8279 param_types [n + sig->hasthis] = sig->params [n];
8280 cfg->arg_types = param_types;
8282 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8283 if (cfg->method == method) {
8285 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8286 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8289 NEW_BBLOCK (cfg, start_bblock);
8290 cfg->bb_entry = start_bblock;
8291 start_bblock->cil_code = NULL;
8292 start_bblock->cil_length = 0;
8295 NEW_BBLOCK (cfg, end_bblock);
8296 cfg->bb_exit = end_bblock;
8297 end_bblock->cil_code = NULL;
8298 end_bblock->cil_length = 0;
8299 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8300 g_assert (cfg->num_bblocks == 2);
8302 arg_array = cfg->args;
8304 if (header->num_clauses) {
8305 cfg->spvars = g_hash_table_new (NULL, NULL);
8306 cfg->exvars = g_hash_table_new (NULL, NULL);
8308 /* handle exception clauses */
8309 for (i = 0; i < header->num_clauses; ++i) {
8310 MonoBasicBlock *try_bb;
8311 MonoExceptionClause *clause = &header->clauses [i];
8312 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8314 try_bb->real_offset = clause->try_offset;
8315 try_bb->try_start = TRUE;
8316 try_bb->region = ((i + 1) << 8) | clause->flags;
8317 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8318 tblock->real_offset = clause->handler_offset;
8319 tblock->flags |= BB_EXCEPTION_HANDLER;
8322 * Linking the try block with the EH block hinders inlining as we won't be able to
8323 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8325 if (COMPILE_LLVM (cfg))
8326 link_bblock (cfg, try_bb, tblock);
8328 if (*(ip + clause->handler_offset) == CEE_POP)
8329 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8331 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8332 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8333 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8334 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8335 MONO_ADD_INS (tblock, ins);
8337 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8338 /* finally clauses already have a seq point */
8339 /* seq points for filter clauses are emitted below */
8340 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8341 MONO_ADD_INS (tblock, ins);
8344 /* todo: is a fault block unsafe to optimize? */
8345 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8346 tblock->flags |= BB_EXCEPTION_UNSAFE;
8349 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8351 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8353 /* catch and filter blocks get the exception object on the stack */
8354 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8355 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8357 /* mostly like handle_stack_args (), but just sets the input args */
8358 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8359 tblock->in_scount = 1;
8360 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8361 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8365 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8366 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8367 if (!cfg->compile_llvm) {
8368 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8369 ins->dreg = tblock->in_stack [0]->dreg;
8370 MONO_ADD_INS (tblock, ins);
8373 MonoInst *dummy_use;
8376 * Add a dummy use for the exvar so its liveness info will be
8379 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8382 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8383 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8384 MONO_ADD_INS (tblock, ins);
8387 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8388 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8389 tblock->flags |= BB_EXCEPTION_HANDLER;
8390 tblock->real_offset = clause->data.filter_offset;
8391 tblock->in_scount = 1;
8392 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8393 /* The filter block shares the exvar with the handler block */
8394 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8395 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8396 MONO_ADD_INS (tblock, ins);
8400 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8401 clause->data.catch_class &&
8403 mono_class_check_context_used (clause->data.catch_class)) {
8405 * In shared generic code with catch
8406 * clauses containing type variables
8407 * the exception handling code has to
8408 * be able to get to the rgctx.
8409 * Therefore we have to make sure that
8410 * the vtable/mrgctx argument (for
8411 * static or generic methods) or the
8412 * "this" argument (for non-static
8413 * methods) are live.
8415 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8416 mini_method_get_context (method)->method_inst ||
8417 method->klass->valuetype) {
8418 mono_get_vtable_var (cfg);
8420 MonoInst *dummy_use;
8422 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8427 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8428 cfg->cbb = start_bblock;
8429 cfg->args = arg_array;
8430 mono_save_args (cfg, sig, inline_args);
8433 /* FIRST CODE BLOCK */
8434 NEW_BBLOCK (cfg, tblock);
8435 tblock->cil_code = ip;
8439 ADD_BBLOCK (cfg, tblock);
8441 if (cfg->method == method) {
8442 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8443 if (breakpoint_id) {
8444 MONO_INST_NEW (cfg, ins, OP_BREAK);
8445 MONO_ADD_INS (cfg->cbb, ins);
8449 /* we use a separate basic block for the initialization code */
8450 NEW_BBLOCK (cfg, init_localsbb);
8451 cfg->bb_init = init_localsbb;
8452 init_localsbb->real_offset = cfg->real_offset;
8453 start_bblock->next_bb = init_localsbb;
8454 init_localsbb->next_bb = cfg->cbb;
8455 link_bblock (cfg, start_bblock, init_localsbb);
8456 link_bblock (cfg, init_localsbb, cfg->cbb);
8458 cfg->cbb = init_localsbb;
8460 if (cfg->gsharedvt && cfg->method == method) {
8461 MonoGSharedVtMethodInfo *info;
8462 MonoInst *var, *locals_var;
8465 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8466 info->method = cfg->method;
8467 info->count_entries = 16;
8468 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8469 cfg->gsharedvt_info = info;
8471 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8472 /* prevent it from being register allocated */
8473 //var->flags |= MONO_INST_VOLATILE;
8474 cfg->gsharedvt_info_var = var;
8476 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8479 /* Allocate locals */
8480 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8481 /* prevent it from being register allocated */
8482 //locals_var->flags |= MONO_INST_VOLATILE;
8483 cfg->gsharedvt_locals_var = locals_var;
8485 dreg = alloc_ireg (cfg);
8486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8488 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8489 ins->dreg = locals_var->dreg;
8491 MONO_ADD_INS (cfg->cbb, ins);
8492 cfg->gsharedvt_locals_var_ins = ins;
8494 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8497 ins->flags |= MONO_INST_INIT;
8501 if (mono_security_core_clr_enabled ()) {
8502 /* check if this is native code, e.g. an icall or a p/invoke */
8503 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8504 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8506 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8507 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8509 /* if this ia a native call then it can only be JITted from platform code */
8510 if ((icall || pinvk) && method->klass && method->klass->image) {
8511 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8512 MonoException *ex = icall ? mono_get_exception_security () :
8513 mono_get_exception_method_access ();
8514 emit_throw_exception (cfg, ex);
8521 CHECK_CFG_EXCEPTION;
8523 if (header->code_size == 0)
8526 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8531 if (cfg->method == method)
8532 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8534 for (n = 0; n < header->num_locals; ++n) {
8535 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8540 /* We force the vtable variable here for all shared methods
8541 for the possibility that they might show up in a stack
8542 trace where their exact instantiation is needed. */
8543 if (cfg->gshared && method == cfg->method) {
8544 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8545 mini_method_get_context (method)->method_inst ||
8546 method->klass->valuetype) {
8547 mono_get_vtable_var (cfg);
8549 /* FIXME: Is there a better way to do this?
8550 We need the variable live for the duration
8551 of the whole method. */
8552 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8556 /* add a check for this != NULL to inlined methods */
8557 if (is_virtual_call) {
8560 NEW_ARGLOAD (cfg, arg_ins, 0);
8561 MONO_ADD_INS (cfg->cbb, arg_ins);
8562 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8565 skip_dead_blocks = !dont_verify;
8566 if (skip_dead_blocks) {
8567 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8572 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8573 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8576 start_new_bblock = 0;
8578 if (cfg->method == method)
8579 cfg->real_offset = ip - header->code;
8581 cfg->real_offset = inline_offset;
8586 if (start_new_bblock) {
8587 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8588 if (start_new_bblock == 2) {
8589 g_assert (ip == tblock->cil_code);
8591 GET_BBLOCK (cfg, tblock, ip);
8593 cfg->cbb->next_bb = tblock;
8595 start_new_bblock = 0;
8596 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8597 if (cfg->verbose_level > 3)
8598 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8599 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8603 g_slist_free (class_inits);
8606 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8607 link_bblock (cfg, cfg->cbb, tblock);
8608 if (sp != stack_start) {
8609 handle_stack_args (cfg, stack_start, sp - stack_start);
8611 CHECK_UNVERIFIABLE (cfg);
8613 cfg->cbb->next_bb = tblock;
8615 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8616 if (cfg->verbose_level > 3)
8617 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8618 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8621 g_slist_free (class_inits);
8626 if (skip_dead_blocks) {
8627 int ip_offset = ip - header->code;
8629 if (ip_offset == bb->end)
8633 int op_size = mono_opcode_size (ip, end);
8634 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8636 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8638 if (ip_offset + op_size == bb->end) {
8639 MONO_INST_NEW (cfg, ins, OP_NOP);
8640 MONO_ADD_INS (cfg->cbb, ins);
8641 start_new_bblock = 1;
8649 * Sequence points are points where the debugger can place a breakpoint.
8650 * Currently, we generate these automatically at points where the IL
8653 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8655 * Make methods interruptable at the beginning, and at the targets of
8656 * backward branches.
8657 * Also, do this at the start of every bblock in methods with clauses too,
8658 * to be able to handle instructions with inprecise control flow like
8660 * Backward branches are handled at the end of method-to-ir ().
8662 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8663 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8665 /* Avoid sequence points on empty IL like .volatile */
8666 // FIXME: Enable this
8667 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8668 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8669 if ((sp != stack_start) && !sym_seq_point)
8670 ins->flags |= MONO_INST_NONEMPTY_STACK;
8671 MONO_ADD_INS (cfg->cbb, ins);
8674 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8677 cfg->cbb->real_offset = cfg->real_offset;
8679 if ((cfg->method == method) && cfg->coverage_info) {
8680 guint32 cil_offset = ip - header->code;
8681 cfg->coverage_info->data [cil_offset].cil_code = ip;
8683 /* TODO: Use an increment here */
8684 #if defined(TARGET_X86)
8685 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8686 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8688 MONO_ADD_INS (cfg->cbb, ins);
8690 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8691 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8695 if (cfg->verbose_level > 3)
8696 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8700 if (seq_points && !sym_seq_points && sp != stack_start) {
8702 * The C# compiler uses these nops to notify the JIT that it should
8703 * insert seq points.
8705 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8706 MONO_ADD_INS (cfg->cbb, ins);
8708 if (cfg->keep_cil_nops)
8709 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8711 MONO_INST_NEW (cfg, ins, OP_NOP);
8713 MONO_ADD_INS (cfg->cbb, ins);
8716 if (should_insert_brekpoint (cfg->method)) {
8717 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8719 MONO_INST_NEW (cfg, ins, OP_NOP);
8722 MONO_ADD_INS (cfg->cbb, ins);
8728 CHECK_STACK_OVF (1);
8729 n = (*ip)-CEE_LDARG_0;
8731 EMIT_NEW_ARGLOAD (cfg, ins, n);
8739 CHECK_STACK_OVF (1);
8740 n = (*ip)-CEE_LDLOC_0;
8742 EMIT_NEW_LOCLOAD (cfg, ins, n);
8751 n = (*ip)-CEE_STLOC_0;
8754 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8756 emit_stloc_ir (cfg, sp, header, n);
8763 CHECK_STACK_OVF (1);
8766 EMIT_NEW_ARGLOAD (cfg, ins, n);
8772 CHECK_STACK_OVF (1);
8775 NEW_ARGLOADA (cfg, ins, n);
8776 MONO_ADD_INS (cfg->cbb, ins);
8786 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8788 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8793 CHECK_STACK_OVF (1);
8796 EMIT_NEW_LOCLOAD (cfg, ins, n);
8800 case CEE_LDLOCA_S: {
8801 unsigned char *tmp_ip;
8803 CHECK_STACK_OVF (1);
8804 CHECK_LOCAL (ip [1]);
8806 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8812 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8821 CHECK_LOCAL (ip [1]);
8822 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8824 emit_stloc_ir (cfg, sp, header, ip [1]);
8829 CHECK_STACK_OVF (1);
8830 EMIT_NEW_PCONST (cfg, ins, NULL);
8831 ins->type = STACK_OBJ;
8836 CHECK_STACK_OVF (1);
8837 EMIT_NEW_ICONST (cfg, ins, -1);
8850 CHECK_STACK_OVF (1);
8851 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8857 CHECK_STACK_OVF (1);
8859 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8865 CHECK_STACK_OVF (1);
8866 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8872 CHECK_STACK_OVF (1);
8873 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8874 ins->type = STACK_I8;
8875 ins->dreg = alloc_dreg (cfg, STACK_I8);
8877 ins->inst_l = (gint64)read64 (ip);
8878 MONO_ADD_INS (cfg->cbb, ins);
8884 gboolean use_aotconst = FALSE;
8886 #ifdef TARGET_POWERPC
8887 /* FIXME: Clean this up */
8888 if (cfg->compile_aot)
8889 use_aotconst = TRUE;
8892 /* FIXME: we should really allocate this only late in the compilation process */
8893 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8895 CHECK_STACK_OVF (1);
8901 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8903 dreg = alloc_freg (cfg);
8904 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8905 ins->type = cfg->r4_stack_type;
8907 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8908 ins->type = cfg->r4_stack_type;
8909 ins->dreg = alloc_dreg (cfg, STACK_R8);
8911 MONO_ADD_INS (cfg->cbb, ins);
8921 gboolean use_aotconst = FALSE;
8923 #ifdef TARGET_POWERPC
8924 /* FIXME: Clean this up */
8925 if (cfg->compile_aot)
8926 use_aotconst = TRUE;
8929 /* FIXME: we should really allocate this only late in the compilation process */
8930 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8932 CHECK_STACK_OVF (1);
8938 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8940 dreg = alloc_freg (cfg);
8941 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8942 ins->type = STACK_R8;
8944 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8945 ins->type = STACK_R8;
8946 ins->dreg = alloc_dreg (cfg, STACK_R8);
8948 MONO_ADD_INS (cfg->cbb, ins);
8957 MonoInst *temp, *store;
8959 CHECK_STACK_OVF (1);
8963 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8964 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8966 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8969 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8982 if (sp [0]->type == STACK_R8)
8983 /* we need to pop the value from the x86 FP stack */
8984 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8989 MonoMethodSignature *fsig;
8992 INLINE_FAILURE ("jmp");
8993 GSHAREDVT_FAILURE (*ip);
8996 if (stack_start != sp)
8998 token = read32 (ip + 1);
8999 /* FIXME: check the signature matches */
9000 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9003 if (cfg->gshared && mono_method_check_context_used (cmethod))
9004 GENERIC_SHARING_FAILURE (CEE_JMP);
9006 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9008 fsig = mono_method_signature (cmethod);
9009 n = fsig->param_count + fsig->hasthis;
9010 if (cfg->llvm_only) {
9013 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9014 for (i = 0; i < n; ++i)
9015 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9016 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9018 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9019 * have to emit a normal return since llvm expects it.
9022 emit_setret (cfg, ins);
9023 MONO_INST_NEW (cfg, ins, OP_BR);
9024 ins->inst_target_bb = end_bblock;
9025 MONO_ADD_INS (cfg->cbb, ins);
9026 link_bblock (cfg, cfg->cbb, end_bblock);
9029 } else if (cfg->backend->have_op_tail_call) {
9030 /* Handle tail calls similarly to calls */
9033 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9034 call->method = cmethod;
9035 call->tail_call = TRUE;
9036 call->signature = mono_method_signature (cmethod);
9037 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9038 call->inst.inst_p0 = cmethod;
9039 for (i = 0; i < n; ++i)
9040 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9042 mono_arch_emit_call (cfg, call);
9043 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9044 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9046 for (i = 0; i < num_args; ++i)
9047 /* Prevent arguments from being optimized away */
9048 arg_array [i]->flags |= MONO_INST_VOLATILE;
9050 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9051 ins = (MonoInst*)call;
9052 ins->inst_p0 = cmethod;
9053 MONO_ADD_INS (cfg->cbb, ins);
9057 start_new_bblock = 1;
9062 MonoMethodSignature *fsig;
9065 token = read32 (ip + 1);
9069 //GSHAREDVT_FAILURE (*ip);
9074 fsig = mini_get_signature (method, token, generic_context);
9076 if (method->dynamic && fsig->pinvoke) {
9080 * This is a call through a function pointer using a pinvoke
9081 * signature. Have to create a wrapper and call that instead.
9082 * FIXME: This is very slow, need to create a wrapper at JIT time
9083 * instead based on the signature.
9085 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9086 EMIT_NEW_PCONST (cfg, args [1], fsig);
9088 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9091 n = fsig->param_count + fsig->hasthis;
9095 //g_assert (!virtual_ || fsig->hasthis);
9099 inline_costs += 10 * num_calls++;
9102 * Making generic calls out of gsharedvt methods.
9103 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9104 * patching gshared method addresses into a gsharedvt method.
9106 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9108 * We pass the address to the gsharedvt trampoline in the rgctx reg
9110 MonoInst *callee = addr;
9112 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9114 GSHAREDVT_FAILURE (*ip);
9118 GSHAREDVT_FAILURE (*ip);
9120 addr = emit_get_rgctx_sig (cfg, context_used,
9121 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9122 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9126 /* Prevent inlining of methods with indirect calls */
9127 INLINE_FAILURE ("indirect call");
9129 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9130 MonoJumpInfoType info_type;
9134 * Instead of emitting an indirect call, emit a direct call
9135 * with the contents of the aotconst as the patch info.
9137 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9138 info_type = (MonoJumpInfoType)addr->inst_c1;
9139 info_data = addr->inst_p0;
9141 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9142 info_data = addr->inst_right->inst_left;
9145 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9146 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9151 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9155 /* End of call, INS should contain the result of the call, if any */
9157 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9159 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9162 CHECK_CFG_EXCEPTION;
9166 constrained_class = NULL;
9170 case CEE_CALLVIRT: {
9171 MonoInst *addr = NULL;
9172 MonoMethodSignature *fsig = NULL;
9174 int virtual_ = *ip == CEE_CALLVIRT;
9175 gboolean pass_imt_from_rgctx = FALSE;
9176 MonoInst *imt_arg = NULL;
9177 MonoInst *keep_this_alive = NULL;
9178 gboolean pass_vtable = FALSE;
9179 gboolean pass_mrgctx = FALSE;
9180 MonoInst *vtable_arg = NULL;
9181 gboolean check_this = FALSE;
9182 gboolean supported_tail_call = FALSE;
9183 gboolean tail_call = FALSE;
9184 gboolean need_seq_point = FALSE;
9185 guint32 call_opcode = *ip;
9186 gboolean emit_widen = TRUE;
9187 gboolean push_res = TRUE;
9188 gboolean skip_ret = FALSE;
9189 gboolean delegate_invoke = FALSE;
9190 gboolean direct_icall = FALSE;
9191 gboolean constrained_partial_call = FALSE;
9192 MonoMethod *cil_method;
9195 token = read32 (ip + 1);
9199 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9202 cil_method = cmethod;
9204 if (constrained_class) {
9205 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9206 if (!mini_is_gsharedvt_klass (constrained_class)) {
9207 g_assert (!cmethod->klass->valuetype);
9208 if (!mini_type_is_reference (&constrained_class->byval_arg))
9209 constrained_partial_call = TRUE;
9213 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9214 if (cfg->verbose_level > 2)
9215 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9216 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9217 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9219 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9223 if (cfg->verbose_level > 2)
9224 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9226 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9228 * This is needed since get_method_constrained can't find
9229 * the method in klass representing a type var.
9230 * The type var is guaranteed to be a reference type in this
9233 if (!mini_is_gsharedvt_klass (constrained_class))
9234 g_assert (!cmethod->klass->valuetype);
9236 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9242 if (!cmethod || mono_loader_get_last_error ()) {
9243 if (mono_loader_get_last_error ()) {
9244 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9245 mono_error_set_from_loader_error (&cfg->error);
9251 if (!dont_verify && !cfg->skip_visibility) {
9252 MonoMethod *target_method = cil_method;
9253 if (method->is_inflated) {
9254 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9257 if (!mono_method_can_access_method (method_definition, target_method) &&
9258 !mono_method_can_access_method (method, cil_method))
9259 METHOD_ACCESS_FAILURE (method, cil_method);
9262 if (mono_security_core_clr_enabled ())
9263 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9265 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9266 /* MS.NET seems to silently convert this to a callvirt */
9271 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9272 * converts to a callvirt.
9274 * tests/bug-515884.il is an example of this behavior
9276 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9277 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9278 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9282 if (!cmethod->klass->inited)
9283 if (!mono_class_init (cmethod->klass))
9284 TYPE_LOAD_ERROR (cmethod->klass);
9286 fsig = mono_method_signature (cmethod);
9289 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9290 mini_class_is_system_array (cmethod->klass)) {
9291 array_rank = cmethod->klass->rank;
9292 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9293 direct_icall = TRUE;
9294 } else if (fsig->pinvoke) {
9295 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9296 fsig = mono_method_signature (wrapper);
9297 } else if (constrained_class) {
9299 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9303 if (cfg->llvm_only && !cfg->method->wrapper_type)
9304 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9306 /* See code below */
9307 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9308 MonoBasicBlock *tbb;
9310 GET_BBLOCK (cfg, tbb, ip + 5);
9311 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9313 * We want to extend the try block to cover the call, but we can't do it if the
9314 * call is made directly since its followed by an exception check.
9316 direct_icall = FALSE;
9320 mono_save_token_info (cfg, image, token, cil_method);
9322 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9323 need_seq_point = TRUE;
9325 /* Don't support calls made using type arguments for now */
9327 if (cfg->gsharedvt) {
9328 if (mini_is_gsharedvt_signature (fsig))
9329 GSHAREDVT_FAILURE (*ip);
9333 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9334 g_assert_not_reached ();
9336 n = fsig->param_count + fsig->hasthis;
9338 if (!cfg->gshared && cmethod->klass->generic_container)
9342 g_assert (!mono_method_check_context_used (cmethod));
9346 //g_assert (!virtual_ || fsig->hasthis);
9351 * We have the `constrained.' prefix opcode.
9353 if (constrained_class) {
9354 if (mini_is_gsharedvt_klass (constrained_class)) {
9355 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9356 /* The 'Own method' case below */
9357 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9358 /* 'The type parameter is instantiated as a reference type' case below. */
9360 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9361 CHECK_CFG_EXCEPTION;
9367 if (constrained_partial_call) {
9368 gboolean need_box = TRUE;
9371 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9372 * called method is not known at compile time either. The called method could end up being
9373 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9374 * to box the receiver.
9375 * A simple solution would be to box always and make a normal virtual call, but that would
9376 * be bad performance wise.
9378 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9380 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9385 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9386 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9387 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9388 ins->klass = constrained_class;
9389 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9390 CHECK_CFG_EXCEPTION;
9391 } else if (need_box) {
9393 MonoBasicBlock *is_ref_bb, *end_bb;
9394 MonoInst *nonbox_call;
9397 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9399 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9400 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9402 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9404 NEW_BBLOCK (cfg, is_ref_bb);
9405 NEW_BBLOCK (cfg, end_bb);
9407 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9409 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9412 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9414 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9417 MONO_START_BB (cfg, is_ref_bb);
9418 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9419 ins->klass = constrained_class;
9420 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9421 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9425 MONO_START_BB (cfg, end_bb);
9428 nonbox_call->dreg = ins->dreg;
9431 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9432 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9433 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9436 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9438 * The type parameter is instantiated as a valuetype,
9439 * but that type doesn't override the method we're
9440 * calling, so we need to box `this'.
9442 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9443 ins->klass = constrained_class;
9444 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9445 CHECK_CFG_EXCEPTION;
9446 } else if (!constrained_class->valuetype) {
9447 int dreg = alloc_ireg_ref (cfg);
9450 * The type parameter is instantiated as a reference
9451 * type. We have a managed pointer on the stack, so
9452 * we need to dereference it here.
9454 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9455 ins->type = STACK_OBJ;
9458 if (cmethod->klass->valuetype) {
9461 /* Interface method */
9464 mono_class_setup_vtable (constrained_class);
9465 CHECK_TYPELOAD (constrained_class);
9466 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9468 TYPE_LOAD_ERROR (constrained_class);
9469 slot = mono_method_get_vtable_slot (cmethod);
9471 TYPE_LOAD_ERROR (cmethod->klass);
9472 cmethod = constrained_class->vtable [ioffset + slot];
9474 if (cmethod->klass == mono_defaults.enum_class) {
9475 /* Enum implements some interfaces, so treat this as the first case */
9476 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9477 ins->klass = constrained_class;
9478 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9479 CHECK_CFG_EXCEPTION;
9484 constrained_class = NULL;
9487 if (check_call_signature (cfg, fsig, sp))
9490 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9491 delegate_invoke = TRUE;
9493 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9494 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9495 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9503 * If the callee is a shared method, then its static cctor
9504 * might not get called after the call was patched.
9506 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9507 emit_class_init (cfg, cmethod->klass);
9508 CHECK_TYPELOAD (cmethod->klass);
9511 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9514 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9516 context_used = mini_method_check_context_used (cfg, cmethod);
9518 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9519 /* Generic method interface
9520 calls are resolved via a
9521 helper function and don't
9523 if (!cmethod_context || !cmethod_context->method_inst)
9524 pass_imt_from_rgctx = TRUE;
9528 * If a shared method calls another
9529 * shared method then the caller must
9530 * have a generic sharing context
9531 * because the magic trampoline
9532 * requires it. FIXME: We shouldn't
9533 * have to force the vtable/mrgctx
9534 * variable here. Instead there
9535 * should be a flag in the cfg to
9536 * request a generic sharing context.
9539 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9540 mono_get_vtable_var (cfg);
9545 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9547 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9549 CHECK_TYPELOAD (cmethod->klass);
9550 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9555 g_assert (!vtable_arg);
9557 if (!cfg->compile_aot) {
9559 * emit_get_rgctx_method () calls mono_class_vtable () so check
9560 * for type load errors before.
9562 mono_class_setup_vtable (cmethod->klass);
9563 CHECK_TYPELOAD (cmethod->klass);
9566 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9568 /* !marshalbyref is needed to properly handle generic methods + remoting */
9569 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9570 MONO_METHOD_IS_FINAL (cmethod)) &&
9571 !mono_class_is_marshalbyref (cmethod->klass)) {
9578 if (pass_imt_from_rgctx) {
9579 g_assert (!pass_vtable);
9581 imt_arg = emit_get_rgctx_method (cfg, context_used,
9582 cmethod, MONO_RGCTX_INFO_METHOD);
9586 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9588 /* Calling virtual generic methods */
9589 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9590 !(MONO_METHOD_IS_FINAL (cmethod) &&
9591 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9592 fsig->generic_param_count &&
9593 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9595 MonoInst *this_temp, *this_arg_temp, *store;
9596 MonoInst *iargs [4];
9598 g_assert (fsig->is_inflated);
9600 /* Prevent inlining of methods that contain indirect calls */
9601 INLINE_FAILURE ("virtual generic call");
9603 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9604 GSHAREDVT_FAILURE (*ip);
9606 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9607 g_assert (!imt_arg);
9609 g_assert (cmethod->is_inflated);
9610 imt_arg = emit_get_rgctx_method (cfg, context_used,
9611 cmethod, MONO_RGCTX_INFO_METHOD);
9612 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9614 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9615 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9616 MONO_ADD_INS (cfg->cbb, store);
9618 /* FIXME: This should be a managed pointer */
9619 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9621 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9622 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9623 cmethod, MONO_RGCTX_INFO_METHOD);
9624 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9625 addr = mono_emit_jit_icall (cfg,
9626 mono_helper_compile_generic_method, iargs);
9628 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9630 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9637 * Implement a workaround for the inherent races involved in locking:
9643 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9644 * try block, the Exit () won't be executed, see:
9645 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9646 * To work around this, we extend such try blocks to include the last x bytes
9647 * of the Monitor.Enter () call.
9649 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9650 MonoBasicBlock *tbb;
9652 GET_BBLOCK (cfg, tbb, ip + 5);
9654 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9655 * from Monitor.Enter like ArgumentNullException.
9657 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9658 /* Mark this bblock as needing to be extended */
9659 tbb->extend_try_block = TRUE;
9663 /* Conversion to a JIT intrinsic */
9664 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9665 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9666 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9673 if ((cfg->opt & MONO_OPT_INLINE) &&
9674 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9675 mono_method_check_inlining (cfg, cmethod)) {
9677 gboolean always = FALSE;
9679 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9680 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9681 /* Prevent inlining of methods that call wrappers */
9682 INLINE_FAILURE ("wrapper call");
9683 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9687 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9689 cfg->real_offset += 5;
9691 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9692 /* *sp is already set by inline_method */
9697 inline_costs += costs;
9703 /* Tail recursion elimination */
9704 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9705 gboolean has_vtargs = FALSE;
9708 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9709 INLINE_FAILURE ("tail call");
9711 /* keep it simple */
9712 for (i = fsig->param_count - 1; i >= 0; i--) {
9713 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9718 for (i = 0; i < n; ++i)
9719 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9720 MONO_INST_NEW (cfg, ins, OP_BR);
9721 MONO_ADD_INS (cfg->cbb, ins);
9722 tblock = start_bblock->out_bb [0];
9723 link_bblock (cfg, cfg->cbb, tblock);
9724 ins->inst_target_bb = tblock;
9725 start_new_bblock = 1;
9727 /* skip the CEE_RET, too */
9728 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9735 inline_costs += 10 * num_calls++;
9738 * Making generic calls out of gsharedvt methods.
9739 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9740 * patching gshared method addresses into a gsharedvt method.
9742 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9743 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9744 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9745 MonoRgctxInfoType info_type;
9748 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9749 //GSHAREDVT_FAILURE (*ip);
9750 // disable for possible remoting calls
9751 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9752 GSHAREDVT_FAILURE (*ip);
9753 if (fsig->generic_param_count) {
9754 /* virtual generic call */
9755 g_assert (!imt_arg);
9756 /* Same as the virtual generic case above */
9757 imt_arg = emit_get_rgctx_method (cfg, context_used,
9758 cmethod, MONO_RGCTX_INFO_METHOD);
9759 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9761 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9762 /* This can happen when we call a fully instantiated iface method */
9763 imt_arg = emit_get_rgctx_method (cfg, context_used,
9764 cmethod, MONO_RGCTX_INFO_METHOD);
9769 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9770 keep_this_alive = sp [0];
9772 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9773 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9775 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9776 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9778 if (cfg->llvm_only) {
9779 // FIXME: Avoid initializing vtable_arg
9780 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9782 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9787 /* Generic sharing */
9790 * Use this if the callee is gsharedvt sharable too, since
9791 * at runtime we might find an instantiation so the call cannot
9792 * be patched (the 'no_patch' code path in mini-trampolines.c).
9794 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9795 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9796 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9797 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9798 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9799 INLINE_FAILURE ("gshared");
9801 g_assert (cfg->gshared && cmethod);
9805 * We are compiling a call to a
9806 * generic method from shared code,
9807 * which means that we have to look up
9808 * the method in the rgctx and do an
9812 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9814 if (cfg->llvm_only) {
9815 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9816 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9818 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9819 // FIXME: Avoid initializing imt_arg/vtable_arg
9820 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9822 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9823 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9828 /* Direct calls to icalls */
9830 MonoMethod *wrapper;
9833 /* Inline the wrapper */
9834 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9836 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9837 g_assert (costs > 0);
9838 cfg->real_offset += 5;
9840 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9841 /* *sp is already set by inline_method */
9846 inline_costs += costs;
9855 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9856 MonoInst *val = sp [fsig->param_count];
9858 if (val->type == STACK_OBJ) {
9859 MonoInst *iargs [2];
9864 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9867 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9868 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9869 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9870 emit_write_barrier (cfg, addr, val);
9871 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9872 GSHAREDVT_FAILURE (*ip);
9873 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9874 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9876 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9877 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9878 if (!cmethod->klass->element_class->valuetype && !readonly)
9879 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9880 CHECK_TYPELOAD (cmethod->klass);
9883 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9886 g_assert_not_reached ();
9893 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9897 /* Tail prefix / tail call optimization */
9899 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9900 /* FIXME: runtime generic context pointer for jumps? */
9901 /* FIXME: handle this for generic sharing eventually */
9902 if ((ins_flag & MONO_INST_TAILCALL) &&
9903 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9904 supported_tail_call = TRUE;
9906 if (supported_tail_call) {
9909 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9910 INLINE_FAILURE ("tail call");
9912 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9914 if (cfg->backend->have_op_tail_call) {
9915 /* Handle tail calls similarly to normal calls */
9918 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9920 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9921 call->tail_call = TRUE;
9922 call->method = cmethod;
9923 call->signature = mono_method_signature (cmethod);
9926 * We implement tail calls by storing the actual arguments into the
9927 * argument variables, then emitting a CEE_JMP.
9929 for (i = 0; i < n; ++i) {
9930 /* Prevent argument from being register allocated */
9931 arg_array [i]->flags |= MONO_INST_VOLATILE;
9932 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9934 ins = (MonoInst*)call;
9935 ins->inst_p0 = cmethod;
9936 ins->inst_p1 = arg_array [0];
9937 MONO_ADD_INS (cfg->cbb, ins);
9938 link_bblock (cfg, cfg->cbb, end_bblock);
9939 start_new_bblock = 1;
9941 // FIXME: Eliminate unreachable epilogs
9944 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9945 * only reachable from this call.
9947 GET_BBLOCK (cfg, tblock, ip + 5);
9948 if (tblock == cfg->cbb || tblock->in_count == 0)
9957 * Synchronized wrappers.
9958 * Its hard to determine where to replace a method with its synchronized
9959 * wrapper without causing an infinite recursion. The current solution is
9960 * to add the synchronized wrapper in the trampolines, and to
9961 * change the called method to a dummy wrapper, and resolve that wrapper
9962 * to the real method in mono_jit_compile_method ().
9964 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9965 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9966 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9967 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9971 * Virtual calls in llvm-only mode.
9973 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9974 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9979 INLINE_FAILURE ("call");
9980 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9981 imt_arg, vtable_arg);
9983 if (tail_call && !cfg->llvm_only) {
9984 link_bblock (cfg, cfg->cbb, end_bblock);
9985 start_new_bblock = 1;
9987 // FIXME: Eliminate unreachable epilogs
9990 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9991 * only reachable from this call.
9993 GET_BBLOCK (cfg, tblock, ip + 5);
9994 if (tblock == cfg->cbb || tblock->in_count == 0)
10001 /* End of call, INS should contain the result of the call, if any */
10003 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10006 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10011 if (keep_this_alive) {
10012 MonoInst *dummy_use;
10014 /* See mono_emit_method_call_full () */
10015 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10018 CHECK_CFG_EXCEPTION;
10022 g_assert (*ip == CEE_RET);
10026 constrained_class = NULL;
10027 if (need_seq_point)
10028 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10032 if (cfg->method != method) {
10033 /* return from inlined method */
10035 * If in_count == 0, that means the ret is unreachable due to
10036 * being preceeded by a throw. In that case, inline_method () will
10037 * handle setting the return value
10038 * (test case: test_0_inline_throw ()).
10040 if (return_var && cfg->cbb->in_count) {
10041 MonoType *ret_type = mono_method_signature (method)->ret;
10047 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10050 //g_assert (returnvar != -1);
10051 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10052 cfg->ret_var_set = TRUE;
10055 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10057 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10058 emit_pop_lmf (cfg);
10061 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10063 if (seq_points && !sym_seq_points) {
10065 * Place a seq point here too even through the IL stack is not
10066 * empty, so a step over on
10069 * will work correctly.
10071 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10072 MONO_ADD_INS (cfg->cbb, ins);
10075 g_assert (!return_var);
10079 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10082 emit_setret (cfg, *sp);
10085 if (sp != stack_start)
10087 MONO_INST_NEW (cfg, ins, OP_BR);
10089 ins->inst_target_bb = end_bblock;
10090 MONO_ADD_INS (cfg->cbb, ins);
10091 link_bblock (cfg, cfg->cbb, end_bblock);
10092 start_new_bblock = 1;
10096 MONO_INST_NEW (cfg, ins, OP_BR);
10098 target = ip + 1 + (signed char)(*ip);
10100 GET_BBLOCK (cfg, tblock, target);
10101 link_bblock (cfg, cfg->cbb, tblock);
10102 ins->inst_target_bb = tblock;
10103 if (sp != stack_start) {
10104 handle_stack_args (cfg, stack_start, sp - stack_start);
10106 CHECK_UNVERIFIABLE (cfg);
10108 MONO_ADD_INS (cfg->cbb, ins);
10109 start_new_bblock = 1;
10110 inline_costs += BRANCH_COST;
10124 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10126 target = ip + 1 + *(signed char*)ip;
10129 ADD_BINCOND (NULL);
10132 inline_costs += BRANCH_COST;
10136 MONO_INST_NEW (cfg, ins, OP_BR);
10139 target = ip + 4 + (gint32)read32(ip);
10141 GET_BBLOCK (cfg, tblock, target);
10142 link_bblock (cfg, cfg->cbb, tblock);
10143 ins->inst_target_bb = tblock;
10144 if (sp != stack_start) {
10145 handle_stack_args (cfg, stack_start, sp - stack_start);
10147 CHECK_UNVERIFIABLE (cfg);
10150 MONO_ADD_INS (cfg->cbb, ins);
10152 start_new_bblock = 1;
10153 inline_costs += BRANCH_COST;
10155 case CEE_BRFALSE_S:
10160 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10161 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10162 guint32 opsize = is_short ? 1 : 4;
10164 CHECK_OPSIZE (opsize);
10166 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10169 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10174 GET_BBLOCK (cfg, tblock, target);
10175 link_bblock (cfg, cfg->cbb, tblock);
10176 GET_BBLOCK (cfg, tblock, ip);
10177 link_bblock (cfg, cfg->cbb, tblock);
10179 if (sp != stack_start) {
10180 handle_stack_args (cfg, stack_start, sp - stack_start);
10181 CHECK_UNVERIFIABLE (cfg);
10184 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10185 cmp->sreg1 = sp [0]->dreg;
10186 type_from_op (cfg, cmp, sp [0], NULL);
10189 #if SIZEOF_REGISTER == 4
10190 if (cmp->opcode == OP_LCOMPARE_IMM) {
10191 /* Convert it to OP_LCOMPARE */
10192 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10193 ins->type = STACK_I8;
10194 ins->dreg = alloc_dreg (cfg, STACK_I8);
10196 MONO_ADD_INS (cfg->cbb, ins);
10197 cmp->opcode = OP_LCOMPARE;
10198 cmp->sreg2 = ins->dreg;
10201 MONO_ADD_INS (cfg->cbb, cmp);
10203 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10204 type_from_op (cfg, ins, sp [0], NULL);
10205 MONO_ADD_INS (cfg->cbb, ins);
10206 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10207 GET_BBLOCK (cfg, tblock, target);
10208 ins->inst_true_bb = tblock;
10209 GET_BBLOCK (cfg, tblock, ip);
10210 ins->inst_false_bb = tblock;
10211 start_new_bblock = 2;
10214 inline_costs += BRANCH_COST;
10229 MONO_INST_NEW (cfg, ins, *ip);
10231 target = ip + 4 + (gint32)read32(ip);
10234 ADD_BINCOND (NULL);
10237 inline_costs += BRANCH_COST;
10241 MonoBasicBlock **targets;
10242 MonoBasicBlock *default_bblock;
10243 MonoJumpInfoBBTable *table;
10244 int offset_reg = alloc_preg (cfg);
10245 int target_reg = alloc_preg (cfg);
10246 int table_reg = alloc_preg (cfg);
10247 int sum_reg = alloc_preg (cfg);
10248 gboolean use_op_switch;
10252 n = read32 (ip + 1);
10255 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10259 CHECK_OPSIZE (n * sizeof (guint32));
10260 target = ip + n * sizeof (guint32);
10262 GET_BBLOCK (cfg, default_bblock, target);
10263 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10265 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10266 for (i = 0; i < n; ++i) {
10267 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10268 targets [i] = tblock;
10269 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10273 if (sp != stack_start) {
10275 * Link the current bb with the targets as well, so handle_stack_args
10276 * will set their in_stack correctly.
10278 link_bblock (cfg, cfg->cbb, default_bblock);
10279 for (i = 0; i < n; ++i)
10280 link_bblock (cfg, cfg->cbb, targets [i]);
10282 handle_stack_args (cfg, stack_start, sp - stack_start);
10284 CHECK_UNVERIFIABLE (cfg);
10286 /* Undo the links */
10287 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10288 for (i = 0; i < n; ++i)
10289 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10292 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10293 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10295 for (i = 0; i < n; ++i)
10296 link_bblock (cfg, cfg->cbb, targets [i]);
10298 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10299 table->table = targets;
10300 table->table_size = n;
10302 use_op_switch = FALSE;
10304 /* ARM implements SWITCH statements differently */
10305 /* FIXME: Make it use the generic implementation */
10306 if (!cfg->compile_aot)
10307 use_op_switch = TRUE;
10310 if (COMPILE_LLVM (cfg))
10311 use_op_switch = TRUE;
10313 cfg->cbb->has_jump_table = 1;
10315 if (use_op_switch) {
10316 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10317 ins->sreg1 = src1->dreg;
10318 ins->inst_p0 = table;
10319 ins->inst_many_bb = targets;
10320 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10321 MONO_ADD_INS (cfg->cbb, ins);
10323 if (sizeof (gpointer) == 8)
10324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10328 #if SIZEOF_REGISTER == 8
10329 /* The upper word might not be zero, and we add it to a 64 bit address later */
10330 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10333 if (cfg->compile_aot) {
10334 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10336 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10337 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10338 ins->inst_p0 = table;
10339 ins->dreg = table_reg;
10340 MONO_ADD_INS (cfg->cbb, ins);
10343 /* FIXME: Use load_memindex */
10344 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10346 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10348 start_new_bblock = 1;
10349 inline_costs += (BRANCH_COST * 2);
10362 case CEE_LDIND_REF:
10369 dreg = alloc_freg (cfg);
10372 dreg = alloc_lreg (cfg);
10374 case CEE_LDIND_REF:
10375 dreg = alloc_ireg_ref (cfg);
10378 dreg = alloc_preg (cfg);
10381 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10382 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10383 if (*ip == CEE_LDIND_R4)
10384 ins->type = cfg->r4_stack_type;
10385 ins->flags |= ins_flag;
10386 MONO_ADD_INS (cfg->cbb, ins);
10388 if (ins_flag & MONO_INST_VOLATILE) {
10389 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10390 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10395 case CEE_STIND_REF:
10406 if (ins_flag & MONO_INST_VOLATILE) {
10407 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10408 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10411 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10412 ins->flags |= ins_flag;
10415 MONO_ADD_INS (cfg->cbb, ins);
10417 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10418 emit_write_barrier (cfg, sp [0], sp [1]);
10427 MONO_INST_NEW (cfg, ins, (*ip));
10429 ins->sreg1 = sp [0]->dreg;
10430 ins->sreg2 = sp [1]->dreg;
10431 type_from_op (cfg, ins, sp [0], sp [1]);
10433 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10435 /* Use the immediate opcodes if possible */
10436 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10437 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10438 if (imm_opcode != -1) {
10439 ins->opcode = imm_opcode;
10440 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10443 NULLIFY_INS (sp [1]);
10447 MONO_ADD_INS ((cfg)->cbb, (ins));
10449 *sp++ = mono_decompose_opcode (cfg, ins);
10466 MONO_INST_NEW (cfg, ins, (*ip));
10468 ins->sreg1 = sp [0]->dreg;
10469 ins->sreg2 = sp [1]->dreg;
10470 type_from_op (cfg, ins, sp [0], sp [1]);
10472 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10473 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10475 /* FIXME: Pass opcode to is_inst_imm */
10477 /* Use the immediate opcodes if possible */
10478 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10479 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10480 if (imm_opcode != -1) {
10481 ins->opcode = imm_opcode;
10482 if (sp [1]->opcode == OP_I8CONST) {
10483 #if SIZEOF_REGISTER == 8
10484 ins->inst_imm = sp [1]->inst_l;
10486 ins->inst_ls_word = sp [1]->inst_ls_word;
10487 ins->inst_ms_word = sp [1]->inst_ms_word;
10491 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10494 /* Might be followed by an instruction added by add_widen_op */
10495 if (sp [1]->next == NULL)
10496 NULLIFY_INS (sp [1]);
10499 MONO_ADD_INS ((cfg)->cbb, (ins));
10501 *sp++ = mono_decompose_opcode (cfg, ins);
10514 case CEE_CONV_OVF_I8:
10515 case CEE_CONV_OVF_U8:
10516 case CEE_CONV_R_UN:
10519 /* Special case this earlier so we have long constants in the IR */
10520 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10521 int data = sp [-1]->inst_c0;
10522 sp [-1]->opcode = OP_I8CONST;
10523 sp [-1]->type = STACK_I8;
10524 #if SIZEOF_REGISTER == 8
10525 if ((*ip) == CEE_CONV_U8)
10526 sp [-1]->inst_c0 = (guint32)data;
10528 sp [-1]->inst_c0 = data;
10530 sp [-1]->inst_ls_word = data;
10531 if ((*ip) == CEE_CONV_U8)
10532 sp [-1]->inst_ms_word = 0;
10534 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10536 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10543 case CEE_CONV_OVF_I4:
10544 case CEE_CONV_OVF_I1:
10545 case CEE_CONV_OVF_I2:
10546 case CEE_CONV_OVF_I:
10547 case CEE_CONV_OVF_U:
10550 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10551 ADD_UNOP (CEE_CONV_OVF_I8);
10558 case CEE_CONV_OVF_U1:
10559 case CEE_CONV_OVF_U2:
10560 case CEE_CONV_OVF_U4:
10563 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10564 ADD_UNOP (CEE_CONV_OVF_U8);
10571 case CEE_CONV_OVF_I1_UN:
10572 case CEE_CONV_OVF_I2_UN:
10573 case CEE_CONV_OVF_I4_UN:
10574 case CEE_CONV_OVF_I8_UN:
10575 case CEE_CONV_OVF_U1_UN:
10576 case CEE_CONV_OVF_U2_UN:
10577 case CEE_CONV_OVF_U4_UN:
10578 case CEE_CONV_OVF_U8_UN:
10579 case CEE_CONV_OVF_I_UN:
10580 case CEE_CONV_OVF_U_UN:
10587 CHECK_CFG_EXCEPTION;
10591 case CEE_ADD_OVF_UN:
10593 case CEE_MUL_OVF_UN:
10595 case CEE_SUB_OVF_UN:
10601 GSHAREDVT_FAILURE (*ip);
10604 token = read32 (ip + 1);
10605 klass = mini_get_class (method, token, generic_context);
10606 CHECK_TYPELOAD (klass);
10608 if (generic_class_is_reference_type (cfg, klass)) {
10609 MonoInst *store, *load;
10610 int dreg = alloc_ireg_ref (cfg);
10612 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10613 load->flags |= ins_flag;
10614 MONO_ADD_INS (cfg->cbb, load);
10616 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10617 store->flags |= ins_flag;
10618 MONO_ADD_INS (cfg->cbb, store);
10620 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10621 emit_write_barrier (cfg, sp [0], sp [1]);
10623 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10629 int loc_index = -1;
10635 token = read32 (ip + 1);
10636 klass = mini_get_class (method, token, generic_context);
10637 CHECK_TYPELOAD (klass);
10639 /* Optimize the common ldobj+stloc combination */
10642 loc_index = ip [6];
10649 loc_index = ip [5] - CEE_STLOC_0;
10656 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10657 CHECK_LOCAL (loc_index);
10659 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10660 ins->dreg = cfg->locals [loc_index]->dreg;
10661 ins->flags |= ins_flag;
10664 if (ins_flag & MONO_INST_VOLATILE) {
10665 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10666 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10672 /* Optimize the ldobj+stobj combination */
10673 /* The reference case ends up being a load+store anyway */
10674 /* Skip this if the operation is volatile. */
10675 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10680 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10687 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10688 ins->flags |= ins_flag;
10691 if (ins_flag & MONO_INST_VOLATILE) {
10692 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10693 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10702 CHECK_STACK_OVF (1);
10704 n = read32 (ip + 1);
10706 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10707 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10708 ins->type = STACK_OBJ;
10711 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10712 MonoInst *iargs [1];
10713 char *str = (char *)mono_method_get_wrapper_data (method, n);
10715 if (cfg->compile_aot)
10716 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10718 EMIT_NEW_PCONST (cfg, iargs [0], str);
10719 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10721 if (cfg->opt & MONO_OPT_SHARED) {
10722 MonoInst *iargs [3];
10724 if (cfg->compile_aot) {
10725 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10727 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10728 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10729 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10730 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10731 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10733 if (cfg->cbb->out_of_line) {
10734 MonoInst *iargs [2];
10736 if (image == mono_defaults.corlib) {
10738 * Avoid relocations in AOT and save some space by using a
10739 * version of helper_ldstr specialized to mscorlib.
10741 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10742 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10744 /* Avoid creating the string object */
10745 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10746 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10747 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10751 if (cfg->compile_aot) {
10752 NEW_LDSTRCONST (cfg, ins, image, n);
10754 MONO_ADD_INS (cfg->cbb, ins);
10757 NEW_PCONST (cfg, ins, NULL);
10758 ins->type = STACK_OBJ;
10759 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10761 OUT_OF_MEMORY_FAILURE;
10764 MONO_ADD_INS (cfg->cbb, ins);
10773 MonoInst *iargs [2];
10774 MonoMethodSignature *fsig;
10777 MonoInst *vtable_arg = NULL;
10780 token = read32 (ip + 1);
10781 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10784 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10787 mono_save_token_info (cfg, image, token, cmethod);
10789 if (!mono_class_init (cmethod->klass))
10790 TYPE_LOAD_ERROR (cmethod->klass);
10792 context_used = mini_method_check_context_used (cfg, cmethod);
10794 if (mono_security_core_clr_enabled ())
10795 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10797 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10798 emit_class_init (cfg, cmethod->klass);
10799 CHECK_TYPELOAD (cmethod->klass);
10803 if (cfg->gsharedvt) {
10804 if (mini_is_gsharedvt_variable_signature (sig))
10805 GSHAREDVT_FAILURE (*ip);
10809 n = fsig->param_count;
10813 * Generate smaller code for the common newobj <exception> instruction in
10814 * argument checking code.
10816 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10817 is_exception_class (cmethod->klass) && n <= 2 &&
10818 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10819 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10820 MonoInst *iargs [3];
10824 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10827 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10830 iargs [1] = sp [0];
10831 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10834 iargs [1] = sp [0];
10835 iargs [2] = sp [1];
10836 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10839 g_assert_not_reached ();
10847 /* move the args to allow room for 'this' in the first position */
10853 /* check_call_signature () requires sp[0] to be set */
10854 this_ins.type = STACK_OBJ;
10855 sp [0] = &this_ins;
10856 if (check_call_signature (cfg, fsig, sp))
10861 if (mini_class_is_system_array (cmethod->klass)) {
10862 *sp = emit_get_rgctx_method (cfg, context_used,
10863 cmethod, MONO_RGCTX_INFO_METHOD);
10865 /* Avoid varargs in the common case */
10866 if (fsig->param_count == 1)
10867 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10868 else if (fsig->param_count == 2)
10869 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10870 else if (fsig->param_count == 3)
10871 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10872 else if (fsig->param_count == 4)
10873 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10875 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10876 } else if (cmethod->string_ctor) {
10877 g_assert (!context_used);
10878 g_assert (!vtable_arg);
10879 /* we simply pass a null pointer */
10880 EMIT_NEW_PCONST (cfg, *sp, NULL);
10881 /* now call the string ctor */
10882 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10884 if (cmethod->klass->valuetype) {
10885 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10886 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10887 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10892 * The code generated by mini_emit_virtual_call () expects
10893 * iargs [0] to be a boxed instance, but luckily the vcall
10894 * will be transformed into a normal call there.
10896 } else if (context_used) {
10897 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10900 MonoVTable *vtable = NULL;
10902 if (!cfg->compile_aot)
10903 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10904 CHECK_TYPELOAD (cmethod->klass);
10907 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10908 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10909 * As a workaround, we call class cctors before allocating objects.
10911 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10912 emit_class_init (cfg, cmethod->klass);
10913 if (cfg->verbose_level > 2)
10914 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10915 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10918 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10921 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10924 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10926 /* Now call the actual ctor */
10927 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10928 CHECK_CFG_EXCEPTION;
10931 if (alloc == NULL) {
10933 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10934 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10942 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10943 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10946 case CEE_CASTCLASS:
10950 token = read32 (ip + 1);
10951 klass = mini_get_class (method, token, generic_context);
10952 CHECK_TYPELOAD (klass);
10953 if (sp [0]->type != STACK_OBJ)
10956 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10957 CHECK_CFG_EXCEPTION;
10966 token = read32 (ip + 1);
10967 klass = mini_get_class (method, token, generic_context);
10968 CHECK_TYPELOAD (klass);
10969 if (sp [0]->type != STACK_OBJ)
10972 context_used = mini_class_check_context_used (cfg, klass);
10974 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10975 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10976 MonoInst *args [3];
10983 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10986 idx = get_castclass_cache_idx (cfg);
10987 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10989 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10992 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10993 MonoMethod *mono_isinst;
10994 MonoInst *iargs [1];
10997 mono_isinst = mono_marshal_get_isinst (klass);
10998 iargs [0] = sp [0];
11000 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
11001 iargs, ip, cfg->real_offset, TRUE);
11002 CHECK_CFG_EXCEPTION;
11003 g_assert (costs > 0);
11006 cfg->real_offset += 5;
11010 inline_costs += costs;
11013 ins = handle_isinst (cfg, klass, *sp, context_used);
11014 CHECK_CFG_EXCEPTION;
11020 case CEE_UNBOX_ANY: {
11021 MonoInst *res, *addr;
11026 token = read32 (ip + 1);
11027 klass = mini_get_class (method, token, generic_context);
11028 CHECK_TYPELOAD (klass);
11030 mono_save_token_info (cfg, image, token, klass);
11032 context_used = mini_class_check_context_used (cfg, klass);
11034 if (mini_is_gsharedvt_klass (klass)) {
11035 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11037 } else if (generic_class_is_reference_type (cfg, klass)) {
11038 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11039 CHECK_CFG_EXCEPTION;
11040 } else if (mono_class_is_nullable (klass)) {
11041 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11043 addr = handle_unbox (cfg, klass, sp, context_used);
11045 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11056 MonoClass *enum_class;
11057 MonoMethod *has_flag;
11063 token = read32 (ip + 1);
11064 klass = mini_get_class (method, token, generic_context);
11065 CHECK_TYPELOAD (klass);
11067 mono_save_token_info (cfg, image, token, klass);
11069 context_used = mini_class_check_context_used (cfg, klass);
11071 if (generic_class_is_reference_type (cfg, klass)) {
11077 if (klass == mono_defaults.void_class)
11079 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11081 /* frequent check in generic code: box (struct), brtrue */
11086 * <push int/long ptr>
11089 * constrained. MyFlags
11090 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11092 * If we find this sequence and the operand types on box and constrained
11093 * are equal, we can emit a specialized instruction sequence instead of
11094 * the very slow HasFlag () call.
11096 if ((cfg->opt & MONO_OPT_INTRINS) &&
11097 /* Cheap checks first. */
11098 ip + 5 + 6 + 5 < end &&
11099 ip [5] == CEE_PREFIX1 &&
11100 ip [6] == CEE_CONSTRAINED_ &&
11101 ip [11] == CEE_CALLVIRT &&
11102 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11103 mono_class_is_enum (klass) &&
11104 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11105 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11106 has_flag->klass == mono_defaults.enum_class &&
11107 !strcmp (has_flag->name, "HasFlag") &&
11108 has_flag->signature->hasthis &&
11109 has_flag->signature->param_count == 1) {
11110 CHECK_TYPELOAD (enum_class);
11112 if (enum_class == klass) {
11113 MonoInst *enum_this, *enum_flag;
11118 enum_this = sp [0];
11119 enum_flag = sp [1];
11121 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11126 // FIXME: LLVM can't handle the inconsistent bb linking
11127 if (!mono_class_is_nullable (klass) &&
11128 !mini_is_gsharedvt_klass (klass) &&
11129 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11130 (ip [5] == CEE_BRTRUE ||
11131 ip [5] == CEE_BRTRUE_S ||
11132 ip [5] == CEE_BRFALSE ||
11133 ip [5] == CEE_BRFALSE_S)) {
11134 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11136 MonoBasicBlock *true_bb, *false_bb;
11140 if (cfg->verbose_level > 3) {
11141 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11142 printf ("<box+brtrue opt>\n");
11147 case CEE_BRFALSE_S:
11150 target = ip + 1 + (signed char)(*ip);
11157 target = ip + 4 + (gint)(read32 (ip));
11161 g_assert_not_reached ();
11165 * We need to link both bblocks, since it is needed for handling stack
11166 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11167 * Branching to only one of them would lead to inconsistencies, so
11168 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11170 GET_BBLOCK (cfg, true_bb, target);
11171 GET_BBLOCK (cfg, false_bb, ip);
11173 mono_link_bblock (cfg, cfg->cbb, true_bb);
11174 mono_link_bblock (cfg, cfg->cbb, false_bb);
11176 if (sp != stack_start) {
11177 handle_stack_args (cfg, stack_start, sp - stack_start);
11179 CHECK_UNVERIFIABLE (cfg);
11182 if (COMPILE_LLVM (cfg)) {
11183 dreg = alloc_ireg (cfg);
11184 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11187 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11189 /* The JIT can't eliminate the iconst+compare */
11190 MONO_INST_NEW (cfg, ins, OP_BR);
11191 ins->inst_target_bb = is_true ? true_bb : false_bb;
11192 MONO_ADD_INS (cfg->cbb, ins);
11195 start_new_bblock = 1;
11199 *sp++ = handle_box (cfg, val, klass, context_used);
11201 CHECK_CFG_EXCEPTION;
11210 token = read32 (ip + 1);
11211 klass = mini_get_class (method, token, generic_context);
11212 CHECK_TYPELOAD (klass);
11214 mono_save_token_info (cfg, image, token, klass);
11216 context_used = mini_class_check_context_used (cfg, klass);
11218 if (mono_class_is_nullable (klass)) {
11221 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11222 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11226 ins = handle_unbox (cfg, klass, sp, context_used);
11239 MonoClassField *field;
11240 #ifndef DISABLE_REMOTING
11244 gboolean is_instance;
11246 gpointer addr = NULL;
11247 gboolean is_special_static;
11249 MonoInst *store_val = NULL;
11250 MonoInst *thread_ins;
11253 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11255 if (op == CEE_STFLD) {
11258 store_val = sp [1];
11263 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11265 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11268 if (op == CEE_STSFLD) {
11271 store_val = sp [0];
11276 token = read32 (ip + 1);
11277 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11278 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11279 klass = field->parent;
11282 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11285 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11286 FIELD_ACCESS_FAILURE (method, field);
11287 mono_class_init (klass);
11289 /* if the class is Critical then transparent code cannot access it's fields */
11290 if (!is_instance && mono_security_core_clr_enabled ())
11291 ensure_method_is_allowed_to_access_field (cfg, method, field);
11293 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11294 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11295 if (mono_security_core_clr_enabled ())
11296 ensure_method_is_allowed_to_access_field (cfg, method, field);
11299 ftype = mono_field_get_type (field);
11302 * LDFLD etc. is usable on static fields as well, so convert those cases to
11305 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11317 g_assert_not_reached ();
11319 is_instance = FALSE;
11322 context_used = mini_class_check_context_used (cfg, klass);
11324 /* INSTANCE CASE */
11326 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11327 if (op == CEE_STFLD) {
11328 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11330 #ifndef DISABLE_REMOTING
11331 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11332 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11333 MonoInst *iargs [5];
11335 GSHAREDVT_FAILURE (op);
11337 iargs [0] = sp [0];
11338 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11339 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11340 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11342 iargs [4] = sp [1];
11344 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11345 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11346 iargs, ip, cfg->real_offset, TRUE);
11347 CHECK_CFG_EXCEPTION;
11348 g_assert (costs > 0);
11350 cfg->real_offset += 5;
11352 inline_costs += costs;
11354 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11361 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11363 if (mini_is_gsharedvt_klass (klass)) {
11364 MonoInst *offset_ins;
11366 context_used = mini_class_check_context_used (cfg, klass);
11368 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11369 /* The value is offset by 1 */
11370 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11371 dreg = alloc_ireg_mp (cfg);
11372 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11373 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11374 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11376 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11378 if (sp [0]->opcode != OP_LDADDR)
11379 store->flags |= MONO_INST_FAULT;
11381 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11382 /* insert call to write barrier */
11386 dreg = alloc_ireg_mp (cfg);
11387 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11388 emit_write_barrier (cfg, ptr, sp [1]);
11391 store->flags |= ins_flag;
11398 #ifndef DISABLE_REMOTING
11399 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11400 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11401 MonoInst *iargs [4];
11403 GSHAREDVT_FAILURE (op);
11405 iargs [0] = sp [0];
11406 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11407 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11408 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11409 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11410 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11411 iargs, ip, cfg->real_offset, TRUE);
11412 CHECK_CFG_EXCEPTION;
11413 g_assert (costs > 0);
11415 cfg->real_offset += 5;
11419 inline_costs += costs;
11421 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11427 if (sp [0]->type == STACK_VTYPE) {
11430 /* Have to compute the address of the variable */
11432 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11434 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11436 g_assert (var->klass == klass);
11438 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11442 if (op == CEE_LDFLDA) {
11443 if (sp [0]->type == STACK_OBJ) {
11444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11445 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11448 dreg = alloc_ireg_mp (cfg);
11450 if (mini_is_gsharedvt_klass (klass)) {
11451 MonoInst *offset_ins;
11453 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11454 /* The value is offset by 1 */
11455 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11456 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11458 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11460 ins->klass = mono_class_from_mono_type (field->type);
11461 ins->type = STACK_MP;
11466 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11468 if (mini_is_gsharedvt_klass (klass)) {
11469 MonoInst *offset_ins;
11471 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11472 /* The value is offset by 1 */
11473 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11474 dreg = alloc_ireg_mp (cfg);
11475 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11476 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11478 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11480 load->flags |= ins_flag;
11481 if (sp [0]->opcode != OP_LDADDR)
11482 load->flags |= MONO_INST_FAULT;
11494 context_used = mini_class_check_context_used (cfg, klass);
11496 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11499 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11500 * to be called here.
11502 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11503 mono_class_vtable (cfg->domain, klass);
11504 CHECK_TYPELOAD (klass);
11506 mono_domain_lock (cfg->domain);
11507 if (cfg->domain->special_static_fields)
11508 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11509 mono_domain_unlock (cfg->domain);
11511 is_special_static = mono_class_field_is_special_static (field);
11513 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11514 thread_ins = mono_get_thread_intrinsic (cfg);
11518 /* Generate IR to compute the field address */
11519 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11521 * Fast access to TLS data
11522 * Inline version of get_thread_static_data () in
11526 int idx, static_data_reg, array_reg, dreg;
11528 GSHAREDVT_FAILURE (op);
11530 MONO_ADD_INS (cfg->cbb, thread_ins);
11531 static_data_reg = alloc_ireg (cfg);
11532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11534 if (cfg->compile_aot) {
11535 int offset_reg, offset2_reg, idx_reg;
11537 /* For TLS variables, this will return the TLS offset */
11538 EMIT_NEW_SFLDACONST (cfg, ins, field);
11539 offset_reg = ins->dreg;
11540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11541 idx_reg = alloc_ireg (cfg);
11542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11544 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11545 array_reg = alloc_ireg (cfg);
11546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11547 offset2_reg = alloc_ireg (cfg);
11548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11550 dreg = alloc_ireg (cfg);
11551 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11553 offset = (gsize)addr & 0x7fffffff;
11554 idx = offset & 0x3f;
11556 array_reg = alloc_ireg (cfg);
11557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11558 dreg = alloc_ireg (cfg);
11559 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11561 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11562 (cfg->compile_aot && is_special_static) ||
11563 (context_used && is_special_static)) {
11564 MonoInst *iargs [2];
11566 g_assert (field->parent);
11567 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11568 if (context_used) {
11569 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11570 field, MONO_RGCTX_INFO_CLASS_FIELD);
11572 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11574 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11575 } else if (context_used) {
11576 MonoInst *static_data;
11579 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11580 method->klass->name_space, method->klass->name, method->name,
11581 depth, field->offset);
11584 if (mono_class_needs_cctor_run (klass, method))
11585 emit_class_init (cfg, klass);
11588 * The pointer we're computing here is
11590 * super_info.static_data + field->offset
11592 static_data = emit_get_rgctx_klass (cfg, context_used,
11593 klass, MONO_RGCTX_INFO_STATIC_DATA);
11595 if (mini_is_gsharedvt_klass (klass)) {
11596 MonoInst *offset_ins;
11598 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11599 /* The value is offset by 1 */
11600 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11601 dreg = alloc_ireg_mp (cfg);
11602 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11603 } else if (field->offset == 0) {
11606 int addr_reg = mono_alloc_preg (cfg);
11607 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11609 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11610 MonoInst *iargs [2];
11612 g_assert (field->parent);
11613 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11614 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11615 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11617 MonoVTable *vtable = NULL;
11619 if (!cfg->compile_aot)
11620 vtable = mono_class_vtable (cfg->domain, klass);
11621 CHECK_TYPELOAD (klass);
11624 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11625 if (!(g_slist_find (class_inits, klass))) {
11626 emit_class_init (cfg, klass);
11627 if (cfg->verbose_level > 2)
11628 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11629 class_inits = g_slist_prepend (class_inits, klass);
11632 if (cfg->run_cctors) {
11633 /* This makes so that inline cannot trigger */
11634 /* .cctors: too many apps depend on them */
11635 /* running with a specific order... */
11637 if (! vtable->initialized)
11638 INLINE_FAILURE ("class init");
11639 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11640 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11641 g_assert_not_reached ();
11642 goto exception_exit;
11646 if (cfg->compile_aot)
11647 EMIT_NEW_SFLDACONST (cfg, ins, field);
11650 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11652 EMIT_NEW_PCONST (cfg, ins, addr);
11655 MonoInst *iargs [1];
11656 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11657 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11661 /* Generate IR to do the actual load/store operation */
11663 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11664 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11665 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11668 if (op == CEE_LDSFLDA) {
11669 ins->klass = mono_class_from_mono_type (ftype);
11670 ins->type = STACK_PTR;
11672 } else if (op == CEE_STSFLD) {
11675 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11676 store->flags |= ins_flag;
11678 gboolean is_const = FALSE;
11679 MonoVTable *vtable = NULL;
11680 gpointer addr = NULL;
11682 if (!context_used) {
11683 vtable = mono_class_vtable (cfg->domain, klass);
11684 CHECK_TYPELOAD (klass);
11686 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11687 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11688 int ro_type = ftype->type;
11690 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11691 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11692 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11695 GSHAREDVT_FAILURE (op);
11697 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11700 case MONO_TYPE_BOOLEAN:
11702 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11706 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11709 case MONO_TYPE_CHAR:
11711 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11715 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11720 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11724 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11729 case MONO_TYPE_PTR:
11730 case MONO_TYPE_FNPTR:
11731 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11732 type_to_eval_stack_type ((cfg), field->type, *sp);
11735 case MONO_TYPE_STRING:
11736 case MONO_TYPE_OBJECT:
11737 case MONO_TYPE_CLASS:
11738 case MONO_TYPE_SZARRAY:
11739 case MONO_TYPE_ARRAY:
11740 if (!mono_gc_is_moving ()) {
11741 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11742 type_to_eval_stack_type ((cfg), field->type, *sp);
11750 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11755 case MONO_TYPE_VALUETYPE:
11765 CHECK_STACK_OVF (1);
11767 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11768 load->flags |= ins_flag;
11774 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11775 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11776 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11787 token = read32 (ip + 1);
11788 klass = mini_get_class (method, token, generic_context);
11789 CHECK_TYPELOAD (klass);
11790 if (ins_flag & MONO_INST_VOLATILE) {
11791 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11792 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11794 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11795 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11796 ins->flags |= ins_flag;
11797 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11798 generic_class_is_reference_type (cfg, klass)) {
11799 /* insert call to write barrier */
11800 emit_write_barrier (cfg, sp [0], sp [1]);
11812 const char *data_ptr;
11814 guint32 field_token;
11820 token = read32 (ip + 1);
11822 klass = mini_get_class (method, token, generic_context);
11823 CHECK_TYPELOAD (klass);
11825 context_used = mini_class_check_context_used (cfg, klass);
11827 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11828 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11829 ins->sreg1 = sp [0]->dreg;
11830 ins->type = STACK_I4;
11831 ins->dreg = alloc_ireg (cfg);
11832 MONO_ADD_INS (cfg->cbb, ins);
11833 *sp = mono_decompose_opcode (cfg, ins);
11836 if (context_used) {
11837 MonoInst *args [3];
11838 MonoClass *array_class = mono_array_class_get (klass, 1);
11839 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11841 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11844 args [0] = emit_get_rgctx_klass (cfg, context_used,
11845 array_class, MONO_RGCTX_INFO_VTABLE);
11850 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11852 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11854 if (cfg->opt & MONO_OPT_SHARED) {
11855 /* Decompose now to avoid problems with references to the domainvar */
11856 MonoInst *iargs [3];
11858 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11859 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11860 iargs [2] = sp [0];
11862 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11864 /* Decompose later since it is needed by abcrem */
11865 MonoClass *array_type = mono_array_class_get (klass, 1);
11866 mono_class_vtable (cfg->domain, array_type);
11867 CHECK_TYPELOAD (array_type);
11869 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11870 ins->dreg = alloc_ireg_ref (cfg);
11871 ins->sreg1 = sp [0]->dreg;
11872 ins->inst_newa_class = klass;
11873 ins->type = STACK_OBJ;
11874 ins->klass = array_type;
11875 MONO_ADD_INS (cfg->cbb, ins);
11876 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11877 cfg->cbb->has_array_access = TRUE;
11879 /* Needed so mono_emit_load_get_addr () gets called */
11880 mono_get_got_var (cfg);
11890 * we inline/optimize the initialization sequence if possible.
11891 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11892 * for small sizes open code the memcpy
11893 * ensure the rva field is big enough
11895 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11896 MonoMethod *memcpy_method = get_memcpy_method ();
11897 MonoInst *iargs [3];
11898 int add_reg = alloc_ireg_mp (cfg);
11900 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11901 if (cfg->compile_aot) {
11902 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11904 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11906 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11907 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11916 if (sp [0]->type != STACK_OBJ)
11919 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11920 ins->dreg = alloc_preg (cfg);
11921 ins->sreg1 = sp [0]->dreg;
11922 ins->type = STACK_I4;
11923 /* This flag will be inherited by the decomposition */
11924 ins->flags |= MONO_INST_FAULT;
11925 MONO_ADD_INS (cfg->cbb, ins);
11926 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11927 cfg->cbb->has_array_access = TRUE;
11935 if (sp [0]->type != STACK_OBJ)
11938 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11940 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11941 CHECK_TYPELOAD (klass);
11942 /* we need to make sure that this array is exactly the type it needs
11943 * to be for correctness. the wrappers are lax with their usage
11944 * so we need to ignore them here
11946 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11947 MonoClass *array_class = mono_array_class_get (klass, 1);
11948 mini_emit_check_array_type (cfg, sp [0], array_class);
11949 CHECK_TYPELOAD (array_class);
11953 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11958 case CEE_LDELEM_I1:
11959 case CEE_LDELEM_U1:
11960 case CEE_LDELEM_I2:
11961 case CEE_LDELEM_U2:
11962 case CEE_LDELEM_I4:
11963 case CEE_LDELEM_U4:
11964 case CEE_LDELEM_I8:
11966 case CEE_LDELEM_R4:
11967 case CEE_LDELEM_R8:
11968 case CEE_LDELEM_REF: {
11974 if (*ip == CEE_LDELEM) {
11976 token = read32 (ip + 1);
11977 klass = mini_get_class (method, token, generic_context);
11978 CHECK_TYPELOAD (klass);
11979 mono_class_init (klass);
11982 klass = array_access_to_klass (*ip);
11984 if (sp [0]->type != STACK_OBJ)
11987 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11989 if (mini_is_gsharedvt_variable_klass (klass)) {
11990 // FIXME-VT: OP_ICONST optimization
11991 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11992 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11993 ins->opcode = OP_LOADV_MEMBASE;
11994 } else if (sp [1]->opcode == OP_ICONST) {
11995 int array_reg = sp [0]->dreg;
11996 int index_reg = sp [1]->dreg;
11997 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11999 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
12000 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
12002 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12003 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12005 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12006 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12009 if (*ip == CEE_LDELEM)
12016 case CEE_STELEM_I1:
12017 case CEE_STELEM_I2:
12018 case CEE_STELEM_I4:
12019 case CEE_STELEM_I8:
12020 case CEE_STELEM_R4:
12021 case CEE_STELEM_R8:
12022 case CEE_STELEM_REF:
12027 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12029 if (*ip == CEE_STELEM) {
12031 token = read32 (ip + 1);
12032 klass = mini_get_class (method, token, generic_context);
12033 CHECK_TYPELOAD (klass);
12034 mono_class_init (klass);
12037 klass = array_access_to_klass (*ip);
12039 if (sp [0]->type != STACK_OBJ)
12042 emit_array_store (cfg, klass, sp, TRUE);
12044 if (*ip == CEE_STELEM)
12051 case CEE_CKFINITE: {
12055 if (cfg->llvm_only) {
12056 MonoInst *iargs [1];
12058 iargs [0] = sp [0];
12059 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12061 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12062 ins->sreg1 = sp [0]->dreg;
12063 ins->dreg = alloc_freg (cfg);
12064 ins->type = STACK_R8;
12065 MONO_ADD_INS (cfg->cbb, ins);
12067 *sp++ = mono_decompose_opcode (cfg, ins);
12073 case CEE_REFANYVAL: {
12074 MonoInst *src_var, *src;
12076 int klass_reg = alloc_preg (cfg);
12077 int dreg = alloc_preg (cfg);
12079 GSHAREDVT_FAILURE (*ip);
12082 MONO_INST_NEW (cfg, ins, *ip);
12085 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12086 CHECK_TYPELOAD (klass);
12088 context_used = mini_class_check_context_used (cfg, klass);
12091 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12093 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12094 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12097 if (context_used) {
12098 MonoInst *klass_ins;
12100 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12101 klass, MONO_RGCTX_INFO_KLASS);
12104 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12105 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12107 mini_emit_class_check (cfg, klass_reg, klass);
12109 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12110 ins->type = STACK_MP;
12111 ins->klass = klass;
12116 case CEE_MKREFANY: {
12117 MonoInst *loc, *addr;
12119 GSHAREDVT_FAILURE (*ip);
12122 MONO_INST_NEW (cfg, ins, *ip);
12125 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12126 CHECK_TYPELOAD (klass);
12128 context_used = mini_class_check_context_used (cfg, klass);
12130 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12131 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12133 if (context_used) {
12134 MonoInst *const_ins;
12135 int type_reg = alloc_preg (cfg);
12137 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12138 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12141 } else if (cfg->compile_aot) {
12142 int const_reg = alloc_preg (cfg);
12143 int type_reg = alloc_preg (cfg);
12145 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12146 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12148 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12150 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12151 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12153 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12155 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12156 ins->type = STACK_VTYPE;
12157 ins->klass = mono_defaults.typed_reference_class;
12162 case CEE_LDTOKEN: {
12164 MonoClass *handle_class;
12166 CHECK_STACK_OVF (1);
12169 n = read32 (ip + 1);
12171 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12172 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12173 handle = mono_method_get_wrapper_data (method, n);
12174 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12175 if (handle_class == mono_defaults.typehandle_class)
12176 handle = &((MonoClass*)handle)->byval_arg;
12179 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12184 mono_class_init (handle_class);
12185 if (cfg->gshared) {
12186 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12187 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12188 /* This case handles ldtoken
12189 of an open type, like for
12192 } else if (handle_class == mono_defaults.typehandle_class) {
12193 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12194 } else if (handle_class == mono_defaults.fieldhandle_class)
12195 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12196 else if (handle_class == mono_defaults.methodhandle_class)
12197 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12199 g_assert_not_reached ();
12202 if ((cfg->opt & MONO_OPT_SHARED) &&
12203 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12204 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12205 MonoInst *addr, *vtvar, *iargs [3];
12206 int method_context_used;
12208 method_context_used = mini_method_check_context_used (cfg, method);
12210 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12212 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12213 EMIT_NEW_ICONST (cfg, iargs [1], n);
12214 if (method_context_used) {
12215 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12216 method, MONO_RGCTX_INFO_METHOD);
12217 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12219 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12220 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12222 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12224 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12226 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12228 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12229 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12230 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12231 (cmethod->klass == mono_defaults.systemtype_class) &&
12232 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12233 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12235 mono_class_init (tclass);
12236 if (context_used) {
12237 ins = emit_get_rgctx_klass (cfg, context_used,
12238 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12239 } else if (cfg->compile_aot) {
12240 if (method->wrapper_type) {
12241 mono_error_init (&error); //got to do it since there are multiple conditionals below
12242 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12243 /* Special case for static synchronized wrappers */
12244 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12246 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12247 /* FIXME: n is not a normal token */
12249 EMIT_NEW_PCONST (cfg, ins, NULL);
12252 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12255 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12257 EMIT_NEW_PCONST (cfg, ins, rt);
12259 ins->type = STACK_OBJ;
12260 ins->klass = cmethod->klass;
12263 MonoInst *addr, *vtvar;
12265 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12267 if (context_used) {
12268 if (handle_class == mono_defaults.typehandle_class) {
12269 ins = emit_get_rgctx_klass (cfg, context_used,
12270 mono_class_from_mono_type ((MonoType *)handle),
12271 MONO_RGCTX_INFO_TYPE);
12272 } else if (handle_class == mono_defaults.methodhandle_class) {
12273 ins = emit_get_rgctx_method (cfg, context_used,
12274 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12275 } else if (handle_class == mono_defaults.fieldhandle_class) {
12276 ins = emit_get_rgctx_field (cfg, context_used,
12277 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12279 g_assert_not_reached ();
12281 } else if (cfg->compile_aot) {
12282 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12284 EMIT_NEW_PCONST (cfg, ins, handle);
12286 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12287 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12288 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12298 MONO_INST_NEW (cfg, ins, OP_THROW);
12300 ins->sreg1 = sp [0]->dreg;
12302 cfg->cbb->out_of_line = TRUE;
12303 MONO_ADD_INS (cfg->cbb, ins);
12304 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12305 MONO_ADD_INS (cfg->cbb, ins);
12308 link_bblock (cfg, cfg->cbb, end_bblock);
12309 start_new_bblock = 1;
12310 /* This can complicate code generation for llvm since the return value might not be defined */
12311 if (COMPILE_LLVM (cfg))
12312 INLINE_FAILURE ("throw");
12314 case CEE_ENDFINALLY:
12315 /* mono_save_seq_point_info () depends on this */
12316 if (sp != stack_start)
12317 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12318 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12319 MONO_ADD_INS (cfg->cbb, ins);
12321 start_new_bblock = 1;
12324 * Control will leave the method so empty the stack, otherwise
12325 * the next basic block will start with a nonempty stack.
12327 while (sp != stack_start) {
12332 case CEE_LEAVE_S: {
12335 if (*ip == CEE_LEAVE) {
12337 target = ip + 5 + (gint32)read32(ip + 1);
12340 target = ip + 2 + (signed char)(ip [1]);
12343 /* empty the stack */
12344 while (sp != stack_start) {
12349 * If this leave statement is in a catch block, check for a
12350 * pending exception, and rethrow it if necessary.
12351 * We avoid doing this in runtime invoke wrappers, since those are called
12352 * by native code which excepts the wrapper to catch all exceptions.
12354 for (i = 0; i < header->num_clauses; ++i) {
12355 MonoExceptionClause *clause = &header->clauses [i];
12358 * Use <= in the final comparison to handle clauses with multiple
12359 * leave statements, like in bug #78024.
12360 * The ordering of the exception clauses guarantees that we find the
12361 * innermost clause.
12363 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12365 MonoBasicBlock *dont_throw;
12370 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12373 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12375 NEW_BBLOCK (cfg, dont_throw);
12378 * Currently, we always rethrow the abort exception, despite the
12379 * fact that this is not correct. See thread6.cs for an example.
12380 * But propagating the abort exception is more important than
12381 * getting the sematics right.
12383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12385 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12387 MONO_START_BB (cfg, dont_throw);
12392 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12395 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12397 MonoExceptionClause *clause;
12399 for (tmp = handlers; tmp; tmp = tmp->next) {
12400 clause = (MonoExceptionClause *)tmp->data;
12401 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12403 link_bblock (cfg, cfg->cbb, tblock);
12404 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12405 ins->inst_target_bb = tblock;
12406 ins->inst_eh_block = clause;
12407 MONO_ADD_INS (cfg->cbb, ins);
12408 cfg->cbb->has_call_handler = 1;
12409 if (COMPILE_LLVM (cfg)) {
12410 MonoBasicBlock *target_bb;
12413 * Link the finally bblock with the target, since it will
12414 * conceptually branch there.
12416 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12417 GET_BBLOCK (cfg, target_bb, target);
12418 link_bblock (cfg, tblock, target_bb);
12421 g_list_free (handlers);
12424 MONO_INST_NEW (cfg, ins, OP_BR);
12425 MONO_ADD_INS (cfg->cbb, ins);
12426 GET_BBLOCK (cfg, tblock, target);
12427 link_bblock (cfg, cfg->cbb, tblock);
12428 ins->inst_target_bb = tblock;
12430 start_new_bblock = 1;
12432 if (*ip == CEE_LEAVE)
12441 * Mono specific opcodes
12443 case MONO_CUSTOM_PREFIX: {
12445 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12449 case CEE_MONO_ICALL: {
12451 MonoJitICallInfo *info;
12453 token = read32 (ip + 2);
12454 func = mono_method_get_wrapper_data (method, token);
12455 info = mono_find_jit_icall_by_addr (func);
12457 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12460 CHECK_STACK (info->sig->param_count);
12461 sp -= info->sig->param_count;
12463 ins = mono_emit_jit_icall (cfg, info->func, sp);
12464 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12468 inline_costs += 10 * num_calls++;
12472 case CEE_MONO_LDPTR_CARD_TABLE:
12473 case CEE_MONO_LDPTR_NURSERY_START:
12474 case CEE_MONO_LDPTR_NURSERY_BITS:
12475 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12476 CHECK_STACK_OVF (1);
12479 case CEE_MONO_LDPTR_CARD_TABLE:
12480 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12482 case CEE_MONO_LDPTR_NURSERY_START:
12483 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12485 case CEE_MONO_LDPTR_NURSERY_BITS:
12486 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12488 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12489 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12495 inline_costs += 10 * num_calls++;
12498 case CEE_MONO_LDPTR: {
12501 CHECK_STACK_OVF (1);
12503 token = read32 (ip + 2);
12505 ptr = mono_method_get_wrapper_data (method, token);
12506 EMIT_NEW_PCONST (cfg, ins, ptr);
12509 inline_costs += 10 * num_calls++;
12510 /* Can't embed random pointers into AOT code */
12514 case CEE_MONO_JIT_ICALL_ADDR: {
12515 MonoJitICallInfo *callinfo;
12518 CHECK_STACK_OVF (1);
12520 token = read32 (ip + 2);
12522 ptr = mono_method_get_wrapper_data (method, token);
12523 callinfo = mono_find_jit_icall_by_addr (ptr);
12524 g_assert (callinfo);
12525 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12528 inline_costs += 10 * num_calls++;
12531 case CEE_MONO_ICALL_ADDR: {
12532 MonoMethod *cmethod;
12535 CHECK_STACK_OVF (1);
12537 token = read32 (ip + 2);
12539 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12541 if (cfg->compile_aot) {
12542 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12544 ptr = mono_lookup_internal_call (cmethod);
12546 EMIT_NEW_PCONST (cfg, ins, ptr);
12552 case CEE_MONO_VTADDR: {
12553 MonoInst *src_var, *src;
12559 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12560 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12565 case CEE_MONO_NEWOBJ: {
12566 MonoInst *iargs [2];
12568 CHECK_STACK_OVF (1);
12570 token = read32 (ip + 2);
12571 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12572 mono_class_init (klass);
12573 NEW_DOMAINCONST (cfg, iargs [0]);
12574 MONO_ADD_INS (cfg->cbb, iargs [0]);
12575 NEW_CLASSCONST (cfg, iargs [1], klass);
12576 MONO_ADD_INS (cfg->cbb, iargs [1]);
12577 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12579 inline_costs += 10 * num_calls++;
12582 case CEE_MONO_OBJADDR:
12585 MONO_INST_NEW (cfg, ins, OP_MOVE);
12586 ins->dreg = alloc_ireg_mp (cfg);
12587 ins->sreg1 = sp [0]->dreg;
12588 ins->type = STACK_MP;
12589 MONO_ADD_INS (cfg->cbb, ins);
12593 case CEE_MONO_LDNATIVEOBJ:
12595 * Similar to LDOBJ, but instead load the unmanaged
12596 * representation of the vtype to the stack.
12601 token = read32 (ip + 2);
12602 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12603 g_assert (klass->valuetype);
12604 mono_class_init (klass);
12607 MonoInst *src, *dest, *temp;
12610 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12611 temp->backend.is_pinvoke = 1;
12612 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12613 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12615 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12616 dest->type = STACK_VTYPE;
12617 dest->klass = klass;
12623 case CEE_MONO_RETOBJ: {
12625 * Same as RET, but return the native representation of a vtype
12628 g_assert (cfg->ret);
12629 g_assert (mono_method_signature (method)->pinvoke);
12634 token = read32 (ip + 2);
12635 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12637 if (!cfg->vret_addr) {
12638 g_assert (cfg->ret_var_is_local);
12640 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12642 EMIT_NEW_RETLOADA (cfg, ins);
12644 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12646 if (sp != stack_start)
12649 MONO_INST_NEW (cfg, ins, OP_BR);
12650 ins->inst_target_bb = end_bblock;
12651 MONO_ADD_INS (cfg->cbb, ins);
12652 link_bblock (cfg, cfg->cbb, end_bblock);
12653 start_new_bblock = 1;
12657 case CEE_MONO_CISINST:
12658 case CEE_MONO_CCASTCLASS: {
12663 token = read32 (ip + 2);
12664 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12665 if (ip [1] == CEE_MONO_CISINST)
12666 ins = handle_cisinst (cfg, klass, sp [0]);
12668 ins = handle_ccastclass (cfg, klass, sp [0]);
12673 case CEE_MONO_SAVE_LMF:
12674 case CEE_MONO_RESTORE_LMF:
12677 case CEE_MONO_CLASSCONST:
12678 CHECK_STACK_OVF (1);
12680 token = read32 (ip + 2);
12681 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12684 inline_costs += 10 * num_calls++;
12686 case CEE_MONO_NOT_TAKEN:
12687 cfg->cbb->out_of_line = TRUE;
12690 case CEE_MONO_TLS: {
12693 CHECK_STACK_OVF (1);
12695 key = (MonoTlsKey)read32 (ip + 2);
12696 g_assert (key < TLS_KEY_NUM);
12698 ins = mono_create_tls_get (cfg, key);
12700 if (cfg->compile_aot) {
12702 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12703 ins->dreg = alloc_preg (cfg);
12704 ins->type = STACK_PTR;
12706 g_assert_not_reached ();
12709 ins->type = STACK_PTR;
12710 MONO_ADD_INS (cfg->cbb, ins);
12715 case CEE_MONO_DYN_CALL: {
12716 MonoCallInst *call;
12718 /* It would be easier to call a trampoline, but that would put an
12719 * extra frame on the stack, confusing exception handling. So
12720 * implement it inline using an opcode for now.
12723 if (!cfg->dyn_call_var) {
12724 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12725 /* prevent it from being register allocated */
12726 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12729 /* Has to use a call inst since it local regalloc expects it */
12730 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12731 ins = (MonoInst*)call;
12733 ins->sreg1 = sp [0]->dreg;
12734 ins->sreg2 = sp [1]->dreg;
12735 MONO_ADD_INS (cfg->cbb, ins);
12737 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12740 inline_costs += 10 * num_calls++;
12744 case CEE_MONO_MEMORY_BARRIER: {
12746 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12750 case CEE_MONO_JIT_ATTACH: {
12751 MonoInst *args [16], *domain_ins;
12752 MonoInst *ad_ins, *jit_tls_ins;
12753 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12755 cfg->attach_cookie = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12756 cfg->attach_dummy = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12758 if (mono_threads_is_coop_enabled ()) {
12759 /* AOT code is only used in the root domain */
12760 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12761 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12762 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12763 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12765 EMIT_NEW_PCONST (cfg, ins, NULL);
12766 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12768 ad_ins = mono_get_domain_intrinsic (cfg);
12769 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12771 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12772 NEW_BBLOCK (cfg, next_bb);
12773 NEW_BBLOCK (cfg, call_bb);
12775 if (cfg->compile_aot) {
12776 /* AOT code is only used in the root domain */
12777 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12779 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12781 MONO_ADD_INS (cfg->cbb, ad_ins);
12782 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12783 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12785 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12786 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12787 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12789 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12790 MONO_START_BB (cfg, call_bb);
12793 /* AOT code is only used in the root domain */
12794 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12795 EMIT_NEW_PCONST (cfg, args [1], NULL);
12796 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12797 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12800 MONO_START_BB (cfg, next_bb);
12806 case CEE_MONO_JIT_DETACH: {
12807 MonoInst *args [16];
12809 /* Restore the original domain */
12810 dreg = alloc_ireg (cfg);
12811 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->attach_cookie->dreg);
12812 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12813 mono_emit_jit_icall (cfg, mono_jit_thread_detach, args);
12817 case CEE_MONO_CALLI_EXTRA_ARG: {
12819 MonoMethodSignature *fsig;
12823 * This is the same as CEE_CALLI, but passes an additional argument
12824 * to the called method in llvmonly mode.
12825 * This is only used by delegate invoke wrappers to call the
12826 * actual delegate method.
12828 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12831 token = read32 (ip + 2);
12839 fsig = mini_get_signature (method, token, generic_context);
12841 if (cfg->llvm_only)
12842 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12844 n = fsig->param_count + fsig->hasthis + 1;
12851 if (cfg->llvm_only) {
12853 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12854 * cconv. This is set by mono_init_delegate ().
12856 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12857 MonoInst *callee = addr;
12858 MonoInst *call, *localloc_ins;
12859 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12860 int low_bit_reg = alloc_preg (cfg);
12862 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12863 NEW_BBLOCK (cfg, end_bb);
12865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12869 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12870 addr = emit_get_rgctx_sig (cfg, context_used,
12871 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12873 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12875 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12876 ins->dreg = alloc_preg (cfg);
12877 ins->inst_imm = 2 * SIZEOF_VOID_P;
12878 MONO_ADD_INS (cfg->cbb, ins);
12879 localloc_ins = ins;
12880 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12882 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12884 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12885 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12887 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12888 MONO_START_BB (cfg, is_gsharedvt_bb);
12889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12890 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12891 ins->dreg = call->dreg;
12893 MONO_START_BB (cfg, end_bb);
12895 /* Caller uses a normal calling conv */
12897 MonoInst *callee = addr;
12898 MonoInst *call, *localloc_ins;
12899 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12900 int low_bit_reg = alloc_preg (cfg);
12902 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12903 NEW_BBLOCK (cfg, end_bb);
12905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12909 /* Normal case: callee uses a normal cconv, no conversion is needed */
12910 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12911 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12912 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12913 MONO_START_BB (cfg, is_gsharedvt_bb);
12914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12915 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12916 MONO_ADD_INS (cfg->cbb, addr);
12918 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12920 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12921 ins->dreg = alloc_preg (cfg);
12922 ins->inst_imm = 2 * SIZEOF_VOID_P;
12923 MONO_ADD_INS (cfg->cbb, ins);
12924 localloc_ins = ins;
12925 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12926 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12929 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12930 ins->dreg = call->dreg;
12931 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12933 MONO_START_BB (cfg, end_bb);
12936 /* Same as CEE_CALLI */
12937 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12939 * We pass the address to the gsharedvt trampoline in the rgctx reg
12941 MonoInst *callee = addr;
12943 addr = emit_get_rgctx_sig (cfg, context_used,
12944 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12945 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12947 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12951 if (!MONO_TYPE_IS_VOID (fsig->ret))
12952 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12954 CHECK_CFG_EXCEPTION;
12958 constrained_class = NULL;
12962 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12968 case CEE_PREFIX1: {
12971 case CEE_ARGLIST: {
12972 /* somewhat similar to LDTOKEN */
12973 MonoInst *addr, *vtvar;
12974 CHECK_STACK_OVF (1);
12975 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12977 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12978 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12980 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12981 ins->type = STACK_VTYPE;
12982 ins->klass = mono_defaults.argumenthandle_class;
12992 MonoInst *cmp, *arg1, *arg2;
13000 * The following transforms:
13001 * CEE_CEQ into OP_CEQ
13002 * CEE_CGT into OP_CGT
13003 * CEE_CGT_UN into OP_CGT_UN
13004 * CEE_CLT into OP_CLT
13005 * CEE_CLT_UN into OP_CLT_UN
13007 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13009 MONO_INST_NEW (cfg, ins, cmp->opcode);
13010 cmp->sreg1 = arg1->dreg;
13011 cmp->sreg2 = arg2->dreg;
13012 type_from_op (cfg, cmp, arg1, arg2);
13014 add_widen_op (cfg, cmp, &arg1, &arg2);
13015 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13016 cmp->opcode = OP_LCOMPARE;
13017 else if (arg1->type == STACK_R4)
13018 cmp->opcode = OP_RCOMPARE;
13019 else if (arg1->type == STACK_R8)
13020 cmp->opcode = OP_FCOMPARE;
13022 cmp->opcode = OP_ICOMPARE;
13023 MONO_ADD_INS (cfg->cbb, cmp);
13024 ins->type = STACK_I4;
13025 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13026 type_from_op (cfg, ins, arg1, arg2);
13028 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13030 * The backends expect the fceq opcodes to do the
13033 ins->sreg1 = cmp->sreg1;
13034 ins->sreg2 = cmp->sreg2;
13037 MONO_ADD_INS (cfg->cbb, ins);
13043 MonoInst *argconst;
13044 MonoMethod *cil_method;
13046 CHECK_STACK_OVF (1);
13048 n = read32 (ip + 2);
13049 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13052 mono_class_init (cmethod->klass);
13054 mono_save_token_info (cfg, image, n, cmethod);
13056 context_used = mini_method_check_context_used (cfg, cmethod);
13058 cil_method = cmethod;
13059 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13060 METHOD_ACCESS_FAILURE (method, cil_method);
13062 if (mono_security_core_clr_enabled ())
13063 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13066 * Optimize the common case of ldftn+delegate creation
13068 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13069 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13070 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13071 MonoInst *target_ins, *handle_ins;
13072 MonoMethod *invoke;
13073 int invoke_context_used;
13075 invoke = mono_get_delegate_invoke (ctor_method->klass);
13076 if (!invoke || !mono_method_signature (invoke))
13079 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13081 target_ins = sp [-1];
13083 if (mono_security_core_clr_enabled ())
13084 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13086 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13087 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13088 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13090 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13094 /* FIXME: SGEN support */
13095 if (invoke_context_used == 0 || cfg->llvm_only) {
13097 if (cfg->verbose_level > 3)
13098 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13099 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13102 CHECK_CFG_EXCEPTION;
13112 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13113 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13117 inline_costs += 10 * num_calls++;
13120 case CEE_LDVIRTFTN: {
13121 MonoInst *args [2];
13125 n = read32 (ip + 2);
13126 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13129 mono_class_init (cmethod->klass);
13131 context_used = mini_method_check_context_used (cfg, cmethod);
13133 if (mono_security_core_clr_enabled ())
13134 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13137 * Optimize the common case of ldvirtftn+delegate creation
13139 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13140 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13141 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13142 MonoInst *target_ins, *handle_ins;
13143 MonoMethod *invoke;
13144 int invoke_context_used;
13145 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13147 invoke = mono_get_delegate_invoke (ctor_method->klass);
13148 if (!invoke || !mono_method_signature (invoke))
13151 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13153 target_ins = sp [-1];
13155 if (mono_security_core_clr_enabled ())
13156 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13158 /* FIXME: SGEN support */
13159 if (invoke_context_used == 0 || cfg->llvm_only) {
13161 if (cfg->verbose_level > 3)
13162 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13163 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13166 CHECK_CFG_EXCEPTION;
13179 args [1] = emit_get_rgctx_method (cfg, context_used,
13180 cmethod, MONO_RGCTX_INFO_METHOD);
13183 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13185 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13188 inline_costs += 10 * num_calls++;
13192 CHECK_STACK_OVF (1);
13194 n = read16 (ip + 2);
13196 EMIT_NEW_ARGLOAD (cfg, ins, n);
13201 CHECK_STACK_OVF (1);
13203 n = read16 (ip + 2);
13205 NEW_ARGLOADA (cfg, ins, n);
13206 MONO_ADD_INS (cfg->cbb, ins);
13214 n = read16 (ip + 2);
13216 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13218 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13222 CHECK_STACK_OVF (1);
13224 n = read16 (ip + 2);
13226 EMIT_NEW_LOCLOAD (cfg, ins, n);
13231 unsigned char *tmp_ip;
13232 CHECK_STACK_OVF (1);
13234 n = read16 (ip + 2);
13237 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13243 EMIT_NEW_LOCLOADA (cfg, ins, n);
13252 n = read16 (ip + 2);
13254 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13256 emit_stloc_ir (cfg, sp, header, n);
13263 if (sp != stack_start)
13265 if (cfg->method != method)
13267 * Inlining this into a loop in a parent could lead to
13268 * stack overflows which is different behavior than the
13269 * non-inlined case, thus disable inlining in this case.
13271 INLINE_FAILURE("localloc");
13273 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13274 ins->dreg = alloc_preg (cfg);
13275 ins->sreg1 = sp [0]->dreg;
13276 ins->type = STACK_PTR;
13277 MONO_ADD_INS (cfg->cbb, ins);
13279 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13281 ins->flags |= MONO_INST_INIT;
13286 case CEE_ENDFILTER: {
13287 MonoExceptionClause *clause, *nearest;
13292 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13294 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13295 ins->sreg1 = (*sp)->dreg;
13296 MONO_ADD_INS (cfg->cbb, ins);
13297 start_new_bblock = 1;
13301 for (cc = 0; cc < header->num_clauses; ++cc) {
13302 clause = &header->clauses [cc];
13303 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13304 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13305 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13308 g_assert (nearest);
13309 if ((ip - header->code) != nearest->handler_offset)
13314 case CEE_UNALIGNED_:
13315 ins_flag |= MONO_INST_UNALIGNED;
13316 /* FIXME: record alignment? we can assume 1 for now */
13320 case CEE_VOLATILE_:
13321 ins_flag |= MONO_INST_VOLATILE;
13325 ins_flag |= MONO_INST_TAILCALL;
13326 cfg->flags |= MONO_CFG_HAS_TAIL;
13327 /* Can't inline tail calls at this time */
13328 inline_costs += 100000;
13335 token = read32 (ip + 2);
13336 klass = mini_get_class (method, token, generic_context);
13337 CHECK_TYPELOAD (klass);
13338 if (generic_class_is_reference_type (cfg, klass))
13339 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13341 mini_emit_initobj (cfg, *sp, NULL, klass);
13345 case CEE_CONSTRAINED_:
13347 token = read32 (ip + 2);
13348 constrained_class = mini_get_class (method, token, generic_context);
13349 CHECK_TYPELOAD (constrained_class);
13353 case CEE_INITBLK: {
13354 MonoInst *iargs [3];
13358 /* Skip optimized paths for volatile operations. */
13359 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13360 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13361 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13362 /* emit_memset only works when val == 0 */
13363 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13366 iargs [0] = sp [0];
13367 iargs [1] = sp [1];
13368 iargs [2] = sp [2];
13369 if (ip [1] == CEE_CPBLK) {
13371 * FIXME: It's unclear whether we should be emitting both the acquire
13372 * and release barriers for cpblk. It is technically both a load and
13373 * store operation, so it seems like that's the sensible thing to do.
13375 * FIXME: We emit full barriers on both sides of the operation for
13376 * simplicity. We should have a separate atomic memcpy method instead.
13378 MonoMethod *memcpy_method = get_memcpy_method ();
13380 if (ins_flag & MONO_INST_VOLATILE)
13381 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13383 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13384 call->flags |= ins_flag;
13386 if (ins_flag & MONO_INST_VOLATILE)
13387 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13389 MonoMethod *memset_method = get_memset_method ();
13390 if (ins_flag & MONO_INST_VOLATILE) {
13391 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13392 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13394 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13395 call->flags |= ins_flag;
13406 ins_flag |= MONO_INST_NOTYPECHECK;
13408 ins_flag |= MONO_INST_NORANGECHECK;
13409 /* we ignore the no-nullcheck for now since we
13410 * really do it explicitly only when doing callvirt->call
13414 case CEE_RETHROW: {
13416 int handler_offset = -1;
13418 for (i = 0; i < header->num_clauses; ++i) {
13419 MonoExceptionClause *clause = &header->clauses [i];
13420 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13421 handler_offset = clause->handler_offset;
13426 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13428 if (handler_offset == -1)
13431 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13432 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13433 ins->sreg1 = load->dreg;
13434 MONO_ADD_INS (cfg->cbb, ins);
13436 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13437 MONO_ADD_INS (cfg->cbb, ins);
13440 link_bblock (cfg, cfg->cbb, end_bblock);
13441 start_new_bblock = 1;
13449 CHECK_STACK_OVF (1);
13451 token = read32 (ip + 2);
13452 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13453 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13456 val = mono_type_size (type, &ialign);
13458 MonoClass *klass = mini_get_class (method, token, generic_context);
13459 CHECK_TYPELOAD (klass);
13461 val = mono_type_size (&klass->byval_arg, &ialign);
13463 if (mini_is_gsharedvt_klass (klass))
13464 GSHAREDVT_FAILURE (*ip);
13466 EMIT_NEW_ICONST (cfg, ins, val);
13471 case CEE_REFANYTYPE: {
13472 MonoInst *src_var, *src;
13474 GSHAREDVT_FAILURE (*ip);
13480 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13482 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13483 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13484 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13489 case CEE_READONLY_:
13502 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13512 g_warning ("opcode 0x%02x not handled", *ip);
13516 if (start_new_bblock != 1)
13519 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13520 if (cfg->cbb->next_bb) {
13521 /* This could already be set because of inlining, #693905 */
13522 MonoBasicBlock *bb = cfg->cbb;
13524 while (bb->next_bb)
13526 bb->next_bb = end_bblock;
13528 cfg->cbb->next_bb = end_bblock;
13531 if (cfg->method == method && cfg->domainvar) {
13533 MonoInst *get_domain;
13535 cfg->cbb = init_localsbb;
13537 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13538 MONO_ADD_INS (cfg->cbb, get_domain);
13540 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13542 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13543 MONO_ADD_INS (cfg->cbb, store);
13546 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13547 if (cfg->compile_aot)
13548 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13549 mono_get_got_var (cfg);
13552 if (cfg->method == method && cfg->got_var)
13553 mono_emit_load_got_addr (cfg);
13555 if (init_localsbb) {
13556 cfg->cbb = init_localsbb;
13558 for (i = 0; i < header->num_locals; ++i) {
13559 emit_init_local (cfg, i, header->locals [i], init_locals);
13563 if (cfg->init_ref_vars && cfg->method == method) {
13564 /* Emit initialization for ref vars */
13565 // FIXME: Avoid duplication initialization for IL locals.
13566 for (i = 0; i < cfg->num_varinfo; ++i) {
13567 MonoInst *ins = cfg->varinfo [i];
13569 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13570 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13574 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13575 cfg->cbb = init_localsbb;
13576 emit_push_lmf (cfg);
13579 cfg->cbb = init_localsbb;
13580 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13583 MonoBasicBlock *bb;
13586 * Make seq points at backward branch targets interruptable.
13588 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13589 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13590 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13593 /* Add a sequence point for method entry/exit events */
13594 if (seq_points && cfg->gen_sdb_seq_points) {
13595 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13596 MONO_ADD_INS (init_localsbb, ins);
13597 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13598 MONO_ADD_INS (cfg->bb_exit, ins);
13602 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13603 * the code they refer to was dead (#11880).
13605 if (sym_seq_points) {
13606 for (i = 0; i < header->code_size; ++i) {
13607 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13610 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13611 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13618 if (cfg->method == method) {
13619 MonoBasicBlock *bb;
13620 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13621 bb->region = mono_find_block_region (cfg, bb->real_offset);
13623 mono_create_spvar_for_region (cfg, bb->region);
13624 if (cfg->verbose_level > 2)
13625 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13629 if (inline_costs < 0) {
13632 /* Method is too large */
13633 mname = mono_method_full_name (method, TRUE);
13634 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13638 if ((cfg->verbose_level > 2) && (cfg->method == method))
13639 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13644 g_assert (!mono_error_ok (&cfg->error));
13648 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13652 set_exception_type_from_invalid_il (cfg, method, ip);
13656 g_slist_free (class_inits);
13657 mono_basic_block_free (original_bb);
13658 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13659 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13660 if (cfg->exception_type)
13663 return inline_costs;
13667 store_membase_reg_to_store_membase_imm (int opcode)
13670 case OP_STORE_MEMBASE_REG:
13671 return OP_STORE_MEMBASE_IMM;
13672 case OP_STOREI1_MEMBASE_REG:
13673 return OP_STOREI1_MEMBASE_IMM;
13674 case OP_STOREI2_MEMBASE_REG:
13675 return OP_STOREI2_MEMBASE_IMM;
13676 case OP_STOREI4_MEMBASE_REG:
13677 return OP_STOREI4_MEMBASE_IMM;
13678 case OP_STOREI8_MEMBASE_REG:
13679 return OP_STOREI8_MEMBASE_IMM;
13681 g_assert_not_reached ();
13688 mono_op_to_op_imm (int opcode)
13692 return OP_IADD_IMM;
13694 return OP_ISUB_IMM;
13696 return OP_IDIV_IMM;
13698 return OP_IDIV_UN_IMM;
13700 return OP_IREM_IMM;
13702 return OP_IREM_UN_IMM;
13704 return OP_IMUL_IMM;
13706 return OP_IAND_IMM;
13710 return OP_IXOR_IMM;
13712 return OP_ISHL_IMM;
13714 return OP_ISHR_IMM;
13716 return OP_ISHR_UN_IMM;
13719 return OP_LADD_IMM;
13721 return OP_LSUB_IMM;
13723 return OP_LAND_IMM;
13727 return OP_LXOR_IMM;
13729 return OP_LSHL_IMM;
13731 return OP_LSHR_IMM;
13733 return OP_LSHR_UN_IMM;
13734 #if SIZEOF_REGISTER == 8
13736 return OP_LREM_IMM;
13740 return OP_COMPARE_IMM;
13742 return OP_ICOMPARE_IMM;
13744 return OP_LCOMPARE_IMM;
13746 case OP_STORE_MEMBASE_REG:
13747 return OP_STORE_MEMBASE_IMM;
13748 case OP_STOREI1_MEMBASE_REG:
13749 return OP_STOREI1_MEMBASE_IMM;
13750 case OP_STOREI2_MEMBASE_REG:
13751 return OP_STOREI2_MEMBASE_IMM;
13752 case OP_STOREI4_MEMBASE_REG:
13753 return OP_STOREI4_MEMBASE_IMM;
13755 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13757 return OP_X86_PUSH_IMM;
13758 case OP_X86_COMPARE_MEMBASE_REG:
13759 return OP_X86_COMPARE_MEMBASE_IMM;
13761 #if defined(TARGET_AMD64)
13762 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13763 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13765 case OP_VOIDCALL_REG:
13766 return OP_VOIDCALL;
13774 return OP_LOCALLOC_IMM;
13781 ldind_to_load_membase (int opcode)
13785 return OP_LOADI1_MEMBASE;
13787 return OP_LOADU1_MEMBASE;
13789 return OP_LOADI2_MEMBASE;
13791 return OP_LOADU2_MEMBASE;
13793 return OP_LOADI4_MEMBASE;
13795 return OP_LOADU4_MEMBASE;
13797 return OP_LOAD_MEMBASE;
13798 case CEE_LDIND_REF:
13799 return OP_LOAD_MEMBASE;
13801 return OP_LOADI8_MEMBASE;
13803 return OP_LOADR4_MEMBASE;
13805 return OP_LOADR8_MEMBASE;
13807 g_assert_not_reached ();
13814 stind_to_store_membase (int opcode)
13818 return OP_STOREI1_MEMBASE_REG;
13820 return OP_STOREI2_MEMBASE_REG;
13822 return OP_STOREI4_MEMBASE_REG;
13824 case CEE_STIND_REF:
13825 return OP_STORE_MEMBASE_REG;
13827 return OP_STOREI8_MEMBASE_REG;
13829 return OP_STORER4_MEMBASE_REG;
13831 return OP_STORER8_MEMBASE_REG;
13833 g_assert_not_reached ();
13840 mono_load_membase_to_load_mem (int opcode)
13842 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13843 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13845 case OP_LOAD_MEMBASE:
13846 return OP_LOAD_MEM;
13847 case OP_LOADU1_MEMBASE:
13848 return OP_LOADU1_MEM;
13849 case OP_LOADU2_MEMBASE:
13850 return OP_LOADU2_MEM;
13851 case OP_LOADI4_MEMBASE:
13852 return OP_LOADI4_MEM;
13853 case OP_LOADU4_MEMBASE:
13854 return OP_LOADU4_MEM;
13855 #if SIZEOF_REGISTER == 8
13856 case OP_LOADI8_MEMBASE:
13857 return OP_LOADI8_MEM;
13866 op_to_op_dest_membase (int store_opcode, int opcode)
13868 #if defined(TARGET_X86)
13869 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13874 return OP_X86_ADD_MEMBASE_REG;
13876 return OP_X86_SUB_MEMBASE_REG;
13878 return OP_X86_AND_MEMBASE_REG;
13880 return OP_X86_OR_MEMBASE_REG;
13882 return OP_X86_XOR_MEMBASE_REG;
13885 return OP_X86_ADD_MEMBASE_IMM;
13888 return OP_X86_SUB_MEMBASE_IMM;
13891 return OP_X86_AND_MEMBASE_IMM;
13894 return OP_X86_OR_MEMBASE_IMM;
13897 return OP_X86_XOR_MEMBASE_IMM;
13903 #if defined(TARGET_AMD64)
13904 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13909 return OP_X86_ADD_MEMBASE_REG;
13911 return OP_X86_SUB_MEMBASE_REG;
13913 return OP_X86_AND_MEMBASE_REG;
13915 return OP_X86_OR_MEMBASE_REG;
13917 return OP_X86_XOR_MEMBASE_REG;
13919 return OP_X86_ADD_MEMBASE_IMM;
13921 return OP_X86_SUB_MEMBASE_IMM;
13923 return OP_X86_AND_MEMBASE_IMM;
13925 return OP_X86_OR_MEMBASE_IMM;
13927 return OP_X86_XOR_MEMBASE_IMM;
13929 return OP_AMD64_ADD_MEMBASE_REG;
13931 return OP_AMD64_SUB_MEMBASE_REG;
13933 return OP_AMD64_AND_MEMBASE_REG;
13935 return OP_AMD64_OR_MEMBASE_REG;
13937 return OP_AMD64_XOR_MEMBASE_REG;
13940 return OP_AMD64_ADD_MEMBASE_IMM;
13943 return OP_AMD64_SUB_MEMBASE_IMM;
13946 return OP_AMD64_AND_MEMBASE_IMM;
13949 return OP_AMD64_OR_MEMBASE_IMM;
13952 return OP_AMD64_XOR_MEMBASE_IMM;
13962 op_to_op_store_membase (int store_opcode, int opcode)
13964 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13967 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13968 return OP_X86_SETEQ_MEMBASE;
13970 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13971 return OP_X86_SETNE_MEMBASE;
13979 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13982 /* FIXME: This has sign extension issues */
13984 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13985 return OP_X86_COMPARE_MEMBASE8_IMM;
13988 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13993 return OP_X86_PUSH_MEMBASE;
13994 case OP_COMPARE_IMM:
13995 case OP_ICOMPARE_IMM:
13996 return OP_X86_COMPARE_MEMBASE_IMM;
13999 return OP_X86_COMPARE_MEMBASE_REG;
14003 #ifdef TARGET_AMD64
14004 /* FIXME: This has sign extension issues */
14006 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14007 return OP_X86_COMPARE_MEMBASE8_IMM;
14012 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14013 return OP_X86_PUSH_MEMBASE;
14015 /* FIXME: This only works for 32 bit immediates
14016 case OP_COMPARE_IMM:
14017 case OP_LCOMPARE_IMM:
14018 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14019 return OP_AMD64_COMPARE_MEMBASE_IMM;
14021 case OP_ICOMPARE_IMM:
14022 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14023 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14027 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14028 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14029 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14030 return OP_AMD64_COMPARE_MEMBASE_REG;
14033 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14034 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14043 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14046 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14052 return OP_X86_COMPARE_REG_MEMBASE;
14054 return OP_X86_ADD_REG_MEMBASE;
14056 return OP_X86_SUB_REG_MEMBASE;
14058 return OP_X86_AND_REG_MEMBASE;
14060 return OP_X86_OR_REG_MEMBASE;
14062 return OP_X86_XOR_REG_MEMBASE;
14066 #ifdef TARGET_AMD64
14067 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14070 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14072 return OP_X86_ADD_REG_MEMBASE;
14074 return OP_X86_SUB_REG_MEMBASE;
14076 return OP_X86_AND_REG_MEMBASE;
14078 return OP_X86_OR_REG_MEMBASE;
14080 return OP_X86_XOR_REG_MEMBASE;
14082 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14086 return OP_AMD64_COMPARE_REG_MEMBASE;
14088 return OP_AMD64_ADD_REG_MEMBASE;
14090 return OP_AMD64_SUB_REG_MEMBASE;
14092 return OP_AMD64_AND_REG_MEMBASE;
14094 return OP_AMD64_OR_REG_MEMBASE;
14096 return OP_AMD64_XOR_REG_MEMBASE;
14105 mono_op_to_op_imm_noemul (int opcode)
14108 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14114 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14121 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14126 return mono_op_to_op_imm (opcode);
14131 * mono_handle_global_vregs:
14133 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14137 mono_handle_global_vregs (MonoCompile *cfg)
14139 gint32 *vreg_to_bb;
14140 MonoBasicBlock *bb;
14143 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14145 #ifdef MONO_ARCH_SIMD_INTRINSICS
14146 if (cfg->uses_simd_intrinsics)
14147 mono_simd_simplify_indirection (cfg);
14150 /* Find local vregs used in more than one bb */
14151 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14152 MonoInst *ins = bb->code;
14153 int block_num = bb->block_num;
14155 if (cfg->verbose_level > 2)
14156 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14159 for (; ins; ins = ins->next) {
14160 const char *spec = INS_INFO (ins->opcode);
14161 int regtype = 0, regindex;
14164 if (G_UNLIKELY (cfg->verbose_level > 2))
14165 mono_print_ins (ins);
14167 g_assert (ins->opcode >= MONO_CEE_LAST);
14169 for (regindex = 0; regindex < 4; regindex ++) {
14172 if (regindex == 0) {
14173 regtype = spec [MONO_INST_DEST];
14174 if (regtype == ' ')
14177 } else if (regindex == 1) {
14178 regtype = spec [MONO_INST_SRC1];
14179 if (regtype == ' ')
14182 } else if (regindex == 2) {
14183 regtype = spec [MONO_INST_SRC2];
14184 if (regtype == ' ')
14187 } else if (regindex == 3) {
14188 regtype = spec [MONO_INST_SRC3];
14189 if (regtype == ' ')
14194 #if SIZEOF_REGISTER == 4
14195 /* In the LLVM case, the long opcodes are not decomposed */
14196 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14198 * Since some instructions reference the original long vreg,
14199 * and some reference the two component vregs, it is quite hard
14200 * to determine when it needs to be global. So be conservative.
14202 if (!get_vreg_to_inst (cfg, vreg)) {
14203 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14205 if (cfg->verbose_level > 2)
14206 printf ("LONG VREG R%d made global.\n", vreg);
14210 * Make the component vregs volatile since the optimizations can
14211 * get confused otherwise.
14213 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14214 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14218 g_assert (vreg != -1);
14220 prev_bb = vreg_to_bb [vreg];
14221 if (prev_bb == 0) {
14222 /* 0 is a valid block num */
14223 vreg_to_bb [vreg] = block_num + 1;
14224 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14225 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14228 if (!get_vreg_to_inst (cfg, vreg)) {
14229 if (G_UNLIKELY (cfg->verbose_level > 2))
14230 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14234 if (vreg_is_ref (cfg, vreg))
14235 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14237 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14240 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14243 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14246 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14249 g_assert_not_reached ();
14253 /* Flag as having been used in more than one bb */
14254 vreg_to_bb [vreg] = -1;
14260 /* If a variable is used in only one bblock, convert it into a local vreg */
14261 for (i = 0; i < cfg->num_varinfo; i++) {
14262 MonoInst *var = cfg->varinfo [i];
14263 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14265 switch (var->type) {
14271 #if SIZEOF_REGISTER == 8
14274 #if !defined(TARGET_X86)
14275 /* Enabling this screws up the fp stack on x86 */
14278 if (mono_arch_is_soft_float ())
14282 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14286 /* Arguments are implicitly global */
14287 /* Putting R4 vars into registers doesn't work currently */
14288 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14289 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14291 * Make that the variable's liveness interval doesn't contain a call, since
14292 * that would cause the lvreg to be spilled, making the whole optimization
14295 /* This is too slow for JIT compilation */
14297 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14299 int def_index, call_index, ins_index;
14300 gboolean spilled = FALSE;
14305 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14306 const char *spec = INS_INFO (ins->opcode);
14308 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14309 def_index = ins_index;
14311 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14312 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14313 if (call_index > def_index) {
14319 if (MONO_IS_CALL (ins))
14320 call_index = ins_index;
14330 if (G_UNLIKELY (cfg->verbose_level > 2))
14331 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14332 var->flags |= MONO_INST_IS_DEAD;
14333 cfg->vreg_to_inst [var->dreg] = NULL;
14340 * Compress the varinfo and vars tables so the liveness computation is faster and
14341 * takes up less space.
14344 for (i = 0; i < cfg->num_varinfo; ++i) {
14345 MonoInst *var = cfg->varinfo [i];
14346 if (pos < i && cfg->locals_start == i)
14347 cfg->locals_start = pos;
14348 if (!(var->flags & MONO_INST_IS_DEAD)) {
14350 cfg->varinfo [pos] = cfg->varinfo [i];
14351 cfg->varinfo [pos]->inst_c0 = pos;
14352 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14353 cfg->vars [pos].idx = pos;
14354 #if SIZEOF_REGISTER == 4
14355 if (cfg->varinfo [pos]->type == STACK_I8) {
14356 /* Modify the two component vars too */
14359 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14360 var1->inst_c0 = pos;
14361 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14362 var1->inst_c0 = pos;
14369 cfg->num_varinfo = pos;
14370 if (cfg->locals_start > cfg->num_varinfo)
14371 cfg->locals_start = cfg->num_varinfo;
14375 * mono_allocate_gsharedvt_vars:
14377 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14378 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14381 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14385 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14387 for (i = 0; i < cfg->num_varinfo; ++i) {
14388 MonoInst *ins = cfg->varinfo [i];
14391 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14392 if (i >= cfg->locals_start) {
14394 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14395 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14396 ins->opcode = OP_GSHAREDVT_LOCAL;
14397 ins->inst_imm = idx;
14400 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14401 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14408 * mono_spill_global_vars:
14410 * Generate spill code for variables which are not allocated to registers,
14411 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14412 * code is generated which could be optimized by the local optimization passes.
14415 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14417 MonoBasicBlock *bb;
14419 int orig_next_vreg;
14420 guint32 *vreg_to_lvreg;
14422 guint32 i, lvregs_len;
14423 gboolean dest_has_lvreg = FALSE;
14424 MonoStackType stacktypes [128];
14425 MonoInst **live_range_start, **live_range_end;
14426 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14428 *need_local_opts = FALSE;
14430 memset (spec2, 0, sizeof (spec2));
14432 /* FIXME: Move this function to mini.c */
14433 stacktypes ['i'] = STACK_PTR;
14434 stacktypes ['l'] = STACK_I8;
14435 stacktypes ['f'] = STACK_R8;
14436 #ifdef MONO_ARCH_SIMD_INTRINSICS
14437 stacktypes ['x'] = STACK_VTYPE;
14440 #if SIZEOF_REGISTER == 4
14441 /* Create MonoInsts for longs */
14442 for (i = 0; i < cfg->num_varinfo; i++) {
14443 MonoInst *ins = cfg->varinfo [i];
14445 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14446 switch (ins->type) {
14451 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14454 g_assert (ins->opcode == OP_REGOFFSET);
14456 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14458 tree->opcode = OP_REGOFFSET;
14459 tree->inst_basereg = ins->inst_basereg;
14460 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14462 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14464 tree->opcode = OP_REGOFFSET;
14465 tree->inst_basereg = ins->inst_basereg;
14466 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14476 if (cfg->compute_gc_maps) {
14477 /* registers need liveness info even for !non refs */
14478 for (i = 0; i < cfg->num_varinfo; i++) {
14479 MonoInst *ins = cfg->varinfo [i];
14481 if (ins->opcode == OP_REGVAR)
14482 ins->flags |= MONO_INST_GC_TRACK;
14486 /* FIXME: widening and truncation */
14489 * As an optimization, when a variable allocated to the stack is first loaded into
14490 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14491 * the variable again.
14493 orig_next_vreg = cfg->next_vreg;
14494 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14495 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14499 * These arrays contain the first and last instructions accessing a given
14501 * Since we emit bblocks in the same order we process them here, and we
14502 * don't split live ranges, these will precisely describe the live range of
14503 * the variable, i.e. the instruction range where a valid value can be found
14504 * in the variables location.
14505 * The live range is computed using the liveness info computed by the liveness pass.
14506 * We can't use vmv->range, since that is an abstract live range, and we need
14507 * one which is instruction precise.
14508 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14510 /* FIXME: Only do this if debugging info is requested */
14511 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14512 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14513 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14514 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14516 /* Add spill loads/stores */
14517 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14520 if (cfg->verbose_level > 2)
14521 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14523 /* Clear vreg_to_lvreg array */
14524 for (i = 0; i < lvregs_len; i++)
14525 vreg_to_lvreg [lvregs [i]] = 0;
14529 MONO_BB_FOR_EACH_INS (bb, ins) {
14530 const char *spec = INS_INFO (ins->opcode);
14531 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14532 gboolean store, no_lvreg;
14533 int sregs [MONO_MAX_SRC_REGS];
14535 if (G_UNLIKELY (cfg->verbose_level > 2))
14536 mono_print_ins (ins);
14538 if (ins->opcode == OP_NOP)
14542 * We handle LDADDR here as well, since it can only be decomposed
14543 * when variable addresses are known.
14545 if (ins->opcode == OP_LDADDR) {
14546 MonoInst *var = (MonoInst *)ins->inst_p0;
14548 if (var->opcode == OP_VTARG_ADDR) {
14549 /* Happens on SPARC/S390 where vtypes are passed by reference */
14550 MonoInst *vtaddr = var->inst_left;
14551 if (vtaddr->opcode == OP_REGVAR) {
14552 ins->opcode = OP_MOVE;
14553 ins->sreg1 = vtaddr->dreg;
14555 else if (var->inst_left->opcode == OP_REGOFFSET) {
14556 ins->opcode = OP_LOAD_MEMBASE;
14557 ins->inst_basereg = vtaddr->inst_basereg;
14558 ins->inst_offset = vtaddr->inst_offset;
14561 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14562 /* gsharedvt arg passed by ref */
14563 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14565 ins->opcode = OP_LOAD_MEMBASE;
14566 ins->inst_basereg = var->inst_basereg;
14567 ins->inst_offset = var->inst_offset;
14568 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14569 MonoInst *load, *load2, *load3;
14570 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14571 int reg1, reg2, reg3;
14572 MonoInst *info_var = cfg->gsharedvt_info_var;
14573 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14577 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14580 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14582 g_assert (info_var);
14583 g_assert (locals_var);
14585 /* Mark the instruction used to compute the locals var as used */
14586 cfg->gsharedvt_locals_var_ins = NULL;
14588 /* Load the offset */
14589 if (info_var->opcode == OP_REGOFFSET) {
14590 reg1 = alloc_ireg (cfg);
14591 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14592 } else if (info_var->opcode == OP_REGVAR) {
14594 reg1 = info_var->dreg;
14596 g_assert_not_reached ();
14598 reg2 = alloc_ireg (cfg);
14599 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14600 /* Load the locals area address */
14601 reg3 = alloc_ireg (cfg);
14602 if (locals_var->opcode == OP_REGOFFSET) {
14603 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14604 } else if (locals_var->opcode == OP_REGVAR) {
14605 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14607 g_assert_not_reached ();
14609 /* Compute the address */
14610 ins->opcode = OP_PADD;
14614 mono_bblock_insert_before_ins (bb, ins, load3);
14615 mono_bblock_insert_before_ins (bb, load3, load2);
14617 mono_bblock_insert_before_ins (bb, load2, load);
14619 g_assert (var->opcode == OP_REGOFFSET);
14621 ins->opcode = OP_ADD_IMM;
14622 ins->sreg1 = var->inst_basereg;
14623 ins->inst_imm = var->inst_offset;
14626 *need_local_opts = TRUE;
14627 spec = INS_INFO (ins->opcode);
14630 if (ins->opcode < MONO_CEE_LAST) {
14631 mono_print_ins (ins);
14632 g_assert_not_reached ();
14636 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14640 if (MONO_IS_STORE_MEMBASE (ins)) {
14641 tmp_reg = ins->dreg;
14642 ins->dreg = ins->sreg2;
14643 ins->sreg2 = tmp_reg;
14646 spec2 [MONO_INST_DEST] = ' ';
14647 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14648 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14649 spec2 [MONO_INST_SRC3] = ' ';
14651 } else if (MONO_IS_STORE_MEMINDEX (ins))
14652 g_assert_not_reached ();
14657 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14658 printf ("\t %.3s %d", spec, ins->dreg);
14659 num_sregs = mono_inst_get_src_registers (ins, sregs);
14660 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14661 printf (" %d", sregs [srcindex]);
14668 regtype = spec [MONO_INST_DEST];
14669 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14672 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14673 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14674 MonoInst *store_ins;
14676 MonoInst *def_ins = ins;
14677 int dreg = ins->dreg; /* The original vreg */
14679 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14681 if (var->opcode == OP_REGVAR) {
14682 ins->dreg = var->dreg;
14683 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14685 * Instead of emitting a load+store, use a _membase opcode.
14687 g_assert (var->opcode == OP_REGOFFSET);
14688 if (ins->opcode == OP_MOVE) {
14692 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14693 ins->inst_basereg = var->inst_basereg;
14694 ins->inst_offset = var->inst_offset;
14697 spec = INS_INFO (ins->opcode);
14701 g_assert (var->opcode == OP_REGOFFSET);
14703 prev_dreg = ins->dreg;
14705 /* Invalidate any previous lvreg for this vreg */
14706 vreg_to_lvreg [ins->dreg] = 0;
14710 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14712 store_opcode = OP_STOREI8_MEMBASE_REG;
14715 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14717 #if SIZEOF_REGISTER != 8
14718 if (regtype == 'l') {
14719 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14720 mono_bblock_insert_after_ins (bb, ins, store_ins);
14721 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14722 mono_bblock_insert_after_ins (bb, ins, store_ins);
14723 def_ins = store_ins;
14728 g_assert (store_opcode != OP_STOREV_MEMBASE);
14730 /* Try to fuse the store into the instruction itself */
14731 /* FIXME: Add more instructions */
14732 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14733 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14734 ins->inst_imm = ins->inst_c0;
14735 ins->inst_destbasereg = var->inst_basereg;
14736 ins->inst_offset = var->inst_offset;
14737 spec = INS_INFO (ins->opcode);
14738 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14739 ins->opcode = store_opcode;
14740 ins->inst_destbasereg = var->inst_basereg;
14741 ins->inst_offset = var->inst_offset;
14745 tmp_reg = ins->dreg;
14746 ins->dreg = ins->sreg2;
14747 ins->sreg2 = tmp_reg;
14750 spec2 [MONO_INST_DEST] = ' ';
14751 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14752 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14753 spec2 [MONO_INST_SRC3] = ' ';
14755 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14756 // FIXME: The backends expect the base reg to be in inst_basereg
14757 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14759 ins->inst_basereg = var->inst_basereg;
14760 ins->inst_offset = var->inst_offset;
14761 spec = INS_INFO (ins->opcode);
14763 /* printf ("INS: "); mono_print_ins (ins); */
14764 /* Create a store instruction */
14765 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14767 /* Insert it after the instruction */
14768 mono_bblock_insert_after_ins (bb, ins, store_ins);
14770 def_ins = store_ins;
14773 * We can't assign ins->dreg to var->dreg here, since the
14774 * sregs could use it. So set a flag, and do it after
14777 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14778 dest_has_lvreg = TRUE;
14783 if (def_ins && !live_range_start [dreg]) {
14784 live_range_start [dreg] = def_ins;
14785 live_range_start_bb [dreg] = bb;
14788 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14791 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14792 tmp->inst_c1 = dreg;
14793 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14800 num_sregs = mono_inst_get_src_registers (ins, sregs);
14801 for (srcindex = 0; srcindex < 3; ++srcindex) {
14802 regtype = spec [MONO_INST_SRC1 + srcindex];
14803 sreg = sregs [srcindex];
14805 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14806 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14807 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14808 MonoInst *use_ins = ins;
14809 MonoInst *load_ins;
14810 guint32 load_opcode;
14812 if (var->opcode == OP_REGVAR) {
14813 sregs [srcindex] = var->dreg;
14814 //mono_inst_set_src_registers (ins, sregs);
14815 live_range_end [sreg] = use_ins;
14816 live_range_end_bb [sreg] = bb;
14818 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14821 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14822 /* var->dreg is a hreg */
14823 tmp->inst_c1 = sreg;
14824 mono_bblock_insert_after_ins (bb, ins, tmp);
14830 g_assert (var->opcode == OP_REGOFFSET);
14832 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14834 g_assert (load_opcode != OP_LOADV_MEMBASE);
14836 if (vreg_to_lvreg [sreg]) {
14837 g_assert (vreg_to_lvreg [sreg] != -1);
14839 /* The variable is already loaded to an lvreg */
14840 if (G_UNLIKELY (cfg->verbose_level > 2))
14841 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14842 sregs [srcindex] = vreg_to_lvreg [sreg];
14843 //mono_inst_set_src_registers (ins, sregs);
14847 /* Try to fuse the load into the instruction */
14848 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14849 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14850 sregs [0] = var->inst_basereg;
14851 //mono_inst_set_src_registers (ins, sregs);
14852 ins->inst_offset = var->inst_offset;
14853 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14854 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14855 sregs [1] = var->inst_basereg;
14856 //mono_inst_set_src_registers (ins, sregs);
14857 ins->inst_offset = var->inst_offset;
14859 if (MONO_IS_REAL_MOVE (ins)) {
14860 ins->opcode = OP_NOP;
14863 //printf ("%d ", srcindex); mono_print_ins (ins);
14865 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14867 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14868 if (var->dreg == prev_dreg) {
14870 * sreg refers to the value loaded by the load
14871 * emitted below, but we need to use ins->dreg
14872 * since it refers to the store emitted earlier.
14876 g_assert (sreg != -1);
14877 vreg_to_lvreg [var->dreg] = sreg;
14878 g_assert (lvregs_len < 1024);
14879 lvregs [lvregs_len ++] = var->dreg;
14883 sregs [srcindex] = sreg;
14884 //mono_inst_set_src_registers (ins, sregs);
14886 #if SIZEOF_REGISTER != 8
14887 if (regtype == 'l') {
14888 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14889 mono_bblock_insert_before_ins (bb, ins, load_ins);
14890 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14891 mono_bblock_insert_before_ins (bb, ins, load_ins);
14892 use_ins = load_ins;
14897 #if SIZEOF_REGISTER == 4
14898 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14900 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14901 mono_bblock_insert_before_ins (bb, ins, load_ins);
14902 use_ins = load_ins;
14906 if (var->dreg < orig_next_vreg) {
14907 live_range_end [var->dreg] = use_ins;
14908 live_range_end_bb [var->dreg] = bb;
14911 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14914 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14915 tmp->inst_c1 = var->dreg;
14916 mono_bblock_insert_after_ins (bb, ins, tmp);
14920 mono_inst_set_src_registers (ins, sregs);
14922 if (dest_has_lvreg) {
14923 g_assert (ins->dreg != -1);
14924 vreg_to_lvreg [prev_dreg] = ins->dreg;
14925 g_assert (lvregs_len < 1024);
14926 lvregs [lvregs_len ++] = prev_dreg;
14927 dest_has_lvreg = FALSE;
14931 tmp_reg = ins->dreg;
14932 ins->dreg = ins->sreg2;
14933 ins->sreg2 = tmp_reg;
14936 if (MONO_IS_CALL (ins)) {
14937 /* Clear vreg_to_lvreg array */
14938 for (i = 0; i < lvregs_len; i++)
14939 vreg_to_lvreg [lvregs [i]] = 0;
14941 } else if (ins->opcode == OP_NOP) {
14943 MONO_INST_NULLIFY_SREGS (ins);
14946 if (cfg->verbose_level > 2)
14947 mono_print_ins_index (1, ins);
14950 /* Extend the live range based on the liveness info */
14951 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14952 for (i = 0; i < cfg->num_varinfo; i ++) {
14953 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14955 if (vreg_is_volatile (cfg, vi->vreg))
14956 /* The liveness info is incomplete */
14959 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14960 /* Live from at least the first ins of this bb */
14961 live_range_start [vi->vreg] = bb->code;
14962 live_range_start_bb [vi->vreg] = bb;
14965 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14966 /* Live at least until the last ins of this bb */
14967 live_range_end [vi->vreg] = bb->last_ins;
14968 live_range_end_bb [vi->vreg] = bb;
14975 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14976 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14978 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14979 for (i = 0; i < cfg->num_varinfo; ++i) {
14980 int vreg = MONO_VARINFO (cfg, i)->vreg;
14983 if (live_range_start [vreg]) {
14984 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14986 ins->inst_c1 = vreg;
14987 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14989 if (live_range_end [vreg]) {
14990 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14992 ins->inst_c1 = vreg;
14993 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14994 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14996 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15001 if (cfg->gsharedvt_locals_var_ins) {
15002 /* Nullify if unused */
15003 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15004 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15007 g_free (live_range_start);
15008 g_free (live_range_end);
15009 g_free (live_range_start_bb);
15010 g_free (live_range_end_bb);
15015 * - use 'iadd' instead of 'int_add'
15016 * - handling ovf opcodes: decompose in method_to_ir.
15017 * - unify iregs/fregs
15018 * -> partly done, the missing parts are:
15019 * - a more complete unification would involve unifying the hregs as well, so
15020 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15021 * would no longer map to the machine hregs, so the code generators would need to
15022 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15023 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15024 * fp/non-fp branches speeds it up by about 15%.
15025 * - use sext/zext opcodes instead of shifts
15027 * - get rid of TEMPLOADs if possible and use vregs instead
15028 * - clean up usage of OP_P/OP_ opcodes
15029 * - cleanup usage of DUMMY_USE
15030 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15032 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15033 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15034 * - make sure handle_stack_args () is called before the branch is emitted
15035 * - when the new IR is done, get rid of all unused stuff
15036 * - COMPARE/BEQ as separate instructions or unify them ?
15037 * - keeping them separate allows specialized compare instructions like
15038 * compare_imm, compare_membase
15039 * - most back ends unify fp compare+branch, fp compare+ceq
15040 * - integrate mono_save_args into inline_method
15041 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15042 * - handle long shift opts on 32 bit platforms somehow: they require
15043 * 3 sregs (2 for arg1 and 1 for arg2)
15044 * - make byref a 'normal' type.
15045 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15046 * variable if needed.
15047 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15048 * like inline_method.
15049 * - remove inlining restrictions
15050 * - fix LNEG and enable cfold of INEG
15051 * - generalize x86 optimizations like ldelema as a peephole optimization
15052 * - add store_mem_imm for amd64
15053 * - optimize the loading of the interruption flag in the managed->native wrappers
15054 * - avoid special handling of OP_NOP in passes
15055 * - move code inserting instructions into one function/macro.
15056 * - try a coalescing phase after liveness analysis
15057 * - add float -> vreg conversion + local optimizations on !x86
15058 * - figure out how to handle decomposed branches during optimizations, ie.
15059 * compare+branch, op_jump_table+op_br etc.
15060 * - promote RuntimeXHandles to vregs
15061 * - vtype cleanups:
15062 * - add a NEW_VARLOADA_VREG macro
15063 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15064 * accessing vtype fields.
15065 * - get rid of I8CONST on 64 bit platforms
15066 * - dealing with the increase in code size due to branches created during opcode
15068 * - use extended basic blocks
15069 * - all parts of the JIT
15070 * - handle_global_vregs () && local regalloc
15071 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15072 * - sources of increase in code size:
15075 * - isinst and castclass
15076 * - lvregs not allocated to global registers even if used multiple times
15077 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15079 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15080 * - add all micro optimizations from the old JIT
15081 * - put tree optimizations into the deadce pass
15082 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15083 * specific function.
15084 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15085 * fcompare + branchCC.
15086 * - create a helper function for allocating a stack slot, taking into account
15087 * MONO_CFG_HAS_SPILLUP.
15089 * - merge the ia64 switch changes.
15090 * - optimize mono_regstate2_alloc_int/float.
15091 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15092 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15093 * parts of the tree could be separated by other instructions, killing the tree
15094 * arguments, or stores killing loads etc. Also, should we fold loads into other
15095 * instructions if the result of the load is used multiple times ?
15096 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15097 * - LAST MERGE: 108395.
15098 * - when returning vtypes in registers, generate IR and append it to the end of the
15099 * last bb instead of doing it in the epilog.
15100 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15108 - When to decompose opcodes:
15109 - earlier: this makes some optimizations hard to implement, since the low level IR
15110 no longer contains the neccessary information. But it is easier to do.
15111 - later: harder to implement, enables more optimizations.
15112 - Branches inside bblocks:
15113 - created when decomposing complex opcodes.
15114 - branches to another bblock: harmless, but not tracked by the branch
15115 optimizations, so need to branch to a label at the start of the bblock.
15116 - branches to inside the same bblock: very problematic, trips up the local
15117 reg allocator. Can be fixed by spitting the current bblock, but that is a
15118 complex operation, since some local vregs can become global vregs etc.
15119 - Local/global vregs:
15120 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15121 local register allocator.
15122 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15123 structure, created by mono_create_var (). Assigned to hregs or the stack by
15124 the global register allocator.
15125 - When to do optimizations like alu->alu_imm:
15126 - earlier -> saves work later on since the IR will be smaller/simpler
15127 - later -> can work on more instructions
15128 - Handling of valuetypes:
15129 - When a vtype is pushed on the stack, a new temporary is created, an
15130 instruction computing its address (LDADDR) is emitted and pushed on
15131 the stack. Need to optimize cases when the vtype is used immediately as in
15132 argument passing, stloc etc.
15133 - Instead of the to_end stuff in the old JIT, simply call the function handling
15134 the values on the stack before emitting the last instruction of the bb.
15137 #endif /* DISABLE_JIT */