2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/monitor.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
70 #include "jit-icalls.h"
72 #include "debugger-agent.h"
73 #include "seq-points.h"
74 #include "aot-compiler.h"
75 #include "mini-llvm.h"
77 #define BRANCH_COST 10
78 #define INLINE_LENGTH_LIMIT 20
80 /* These have 'cfg' as an implicit argument */
81 #define INLINE_FAILURE(msg) do { \
82 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
83 inline_failure (cfg, msg); \
84 goto exception_exit; \
87 #define CHECK_CFG_EXCEPTION do {\
88 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
89 goto exception_exit; \
91 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
92 method_access_failure ((cfg), (method), (cmethod)); \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
163 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
166 * Instruction metadata
174 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
175 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
186 /* keep in sync with the enum in mini.h */
189 #include "mini-ops.h"
194 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
195 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
197 * This should contain the index of the last sreg + 1. This is not the same
198 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
200 const gint8 ins_sreg_counts[] = {
201 #include "mini-ops.h"
206 #define MONO_INIT_VARINFO(vi,id) do { \
207 (vi)->range.first_use.pos.bid = 0xffff; \
213 mono_alloc_ireg (MonoCompile *cfg)
215 return alloc_ireg (cfg);
219 mono_alloc_lreg (MonoCompile *cfg)
221 return alloc_lreg (cfg);
225 mono_alloc_freg (MonoCompile *cfg)
227 return alloc_freg (cfg);
231 mono_alloc_preg (MonoCompile *cfg)
233 return alloc_preg (cfg);
237 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
239 return alloc_dreg (cfg, stack_type);
243 * mono_alloc_ireg_ref:
245 * Allocate an IREG, and mark it as holding a GC ref.
248 mono_alloc_ireg_ref (MonoCompile *cfg)
250 return alloc_ireg_ref (cfg);
254 * mono_alloc_ireg_mp:
256 * Allocate an IREG, and mark it as holding a managed pointer.
259 mono_alloc_ireg_mp (MonoCompile *cfg)
261 return alloc_ireg_mp (cfg);
265 * mono_alloc_ireg_copy:
267 * Allocate an IREG with the same GC type as VREG.
270 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
272 if (vreg_is_ref (cfg, vreg))
273 return alloc_ireg_ref (cfg);
274 else if (vreg_is_mp (cfg, vreg))
275 return alloc_ireg_mp (cfg);
277 return alloc_ireg (cfg);
281 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
286 type = mini_get_underlying_type (type);
288 switch (type->type) {
301 case MONO_TYPE_FNPTR:
303 case MONO_TYPE_CLASS:
304 case MONO_TYPE_STRING:
305 case MONO_TYPE_OBJECT:
306 case MONO_TYPE_SZARRAY:
307 case MONO_TYPE_ARRAY:
311 #if SIZEOF_REGISTER == 8
317 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
320 case MONO_TYPE_VALUETYPE:
321 if (type->data.klass->enumtype) {
322 type = mono_class_enum_basetype (type->data.klass);
325 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 case MONO_TYPE_TYPEDBYREF:
330 case MONO_TYPE_GENERICINST:
331 type = &type->data.generic_class->container_class->byval_arg;
335 g_assert (cfg->gshared);
336 if (mini_type_var_is_vt (type))
339 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
341 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
347 mono_print_bb (MonoBasicBlock *bb, const char *msg)
352 printf ("\n%s %d: [IN: ", msg, bb->block_num);
353 for (i = 0; i < bb->in_count; ++i)
354 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
356 for (i = 0; i < bb->out_count; ++i)
357 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
359 for (tree = bb->code; tree; tree = tree->next)
360 mono_print_ins_index (-1, tree);
364 mono_create_helper_signatures (void)
366 helper_sig_domain_get = mono_create_icall_signature ("ptr");
367 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
368 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
384 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
395 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 /* Emit conversions so both operands of a binary opcode are of the same type */
464 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
466 MonoInst *arg1 = *arg1_ref;
467 MonoInst *arg2 = *arg2_ref;
470 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
471 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
474 /* Mixing r4/r8 is allowed by the spec */
475 if (arg1->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
479 conv->type = STACK_R8;
483 if (arg2->type == STACK_R4) {
484 int dreg = alloc_freg (cfg);
486 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
487 conv->type = STACK_R8;
493 #if SIZEOF_REGISTER == 8
494 /* FIXME: Need to add many more cases */
495 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
498 int dr = alloc_preg (cfg);
499 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
500 (ins)->sreg2 = widen->dreg;
505 #define ADD_BINOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 ins->sreg2 = sp [1]->dreg; \
510 type_from_op (cfg, ins, sp [0], sp [1]); \
512 /* Have to insert a widening op */ \
513 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
514 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
515 MONO_ADD_INS ((cfg)->cbb, (ins)); \
516 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
519 #define ADD_UNOP(op) do { \
520 MONO_INST_NEW (cfg, ins, (op)); \
522 ins->sreg1 = sp [0]->dreg; \
523 type_from_op (cfg, ins, sp [0], NULL); \
525 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode (cfg, ins); \
530 #define ADD_BINCOND(next_block) do { \
533 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
534 cmp->sreg1 = sp [0]->dreg; \
535 cmp->sreg2 = sp [1]->dreg; \
536 type_from_op (cfg, cmp, sp [0], sp [1]); \
538 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
539 type_from_op (cfg, ins, sp [0], sp [1]); \
540 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
541 GET_BBLOCK (cfg, tblock, target); \
542 link_bblock (cfg, cfg->cbb, tblock); \
543 ins->inst_true_bb = tblock; \
544 if ((next_block)) { \
545 link_bblock (cfg, cfg->cbb, (next_block)); \
546 ins->inst_false_bb = (next_block); \
547 start_new_bblock = 1; \
549 GET_BBLOCK (cfg, tblock, ip); \
550 link_bblock (cfg, cfg->cbb, tblock); \
551 ins->inst_false_bb = tblock; \
552 start_new_bblock = 2; \
554 if (sp != stack_start) { \
555 handle_stack_args (cfg, stack_start, sp - stack_start); \
556 CHECK_UNVERIFIABLE (cfg); \
558 MONO_ADD_INS (cfg->cbb, cmp); \
559 MONO_ADD_INS (cfg->cbb, ins); \
563 * link_bblock: Links two basic blocks
565 * links two basic blocks in the control flow graph, the 'from'
566 * argument is the starting block and the 'to' argument is the block
567 * the control flow ends to after 'from'.
570 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
572 MonoBasicBlock **newa;
576 if (from->cil_code) {
578 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
580 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
583 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
585 printf ("edge from entry to exit\n");
590 for (i = 0; i < from->out_count; ++i) {
591 if (to == from->out_bb [i]) {
597 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
598 for (i = 0; i < from->out_count; ++i) {
599 newa [i] = from->out_bb [i];
607 for (i = 0; i < to->in_count; ++i) {
608 if (from == to->in_bb [i]) {
614 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
615 for (i = 0; i < to->in_count; ++i) {
616 newa [i] = to->in_bb [i];
625 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
627 link_bblock (cfg, from, to);
631 * mono_find_block_region:
633 * We mark each basic block with a region ID. We use that to avoid BB
634 * optimizations when blocks are in different regions.
637 * A region token that encodes where this region is, and information
638 * about the clause owner for this block.
640 * The region encodes the try/catch/filter clause that owns this block
641 * as well as the type. -1 is a special value that represents a block
642 * that is in none of try/catch/filter.
645 mono_find_block_region (MonoCompile *cfg, int offset)
647 MonoMethodHeader *header = cfg->header;
648 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
654 (offset < (clause->handler_offset)))
655 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
657 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
658 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
659 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
660 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
661 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
663 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
666 for (i = 0; i < header->num_clauses; ++i) {
667 clause = &header->clauses [i];
669 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
670 return ((i + 1) << 8) | clause->flags;
677 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
679 MonoMethodHeader *header = cfg->header;
680 MonoExceptionClause *clause;
684 for (i = 0; i < header->num_clauses; ++i) {
685 clause = &header->clauses [i];
686 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
687 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
688 if (clause->flags == type)
689 res = g_list_append (res, clause);
696 mono_create_spvar_for_region (MonoCompile *cfg, int region)
700 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
704 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
705 /* prevent it from being register allocated */
706 var->flags |= MONO_INST_VOLATILE;
708 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
712 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
714 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
718 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
722 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
726 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
727 /* prevent it from being register allocated */
728 var->flags |= MONO_INST_VOLATILE;
730 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
736 * Returns the type used in the eval stack when @type is loaded.
737 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
740 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
744 type = mini_get_underlying_type (type);
745 inst->klass = klass = mono_class_from_mono_type (type);
747 inst->type = STACK_MP;
752 switch (type->type) {
754 inst->type = STACK_INV;
762 inst->type = STACK_I4;
767 case MONO_TYPE_FNPTR:
768 inst->type = STACK_PTR;
770 case MONO_TYPE_CLASS:
771 case MONO_TYPE_STRING:
772 case MONO_TYPE_OBJECT:
773 case MONO_TYPE_SZARRAY:
774 case MONO_TYPE_ARRAY:
775 inst->type = STACK_OBJ;
779 inst->type = STACK_I8;
782 inst->type = cfg->r4_stack_type;
785 inst->type = STACK_R8;
787 case MONO_TYPE_VALUETYPE:
788 if (type->data.klass->enumtype) {
789 type = mono_class_enum_basetype (type->data.klass);
793 inst->type = STACK_VTYPE;
796 case MONO_TYPE_TYPEDBYREF:
797 inst->klass = mono_defaults.typed_reference_class;
798 inst->type = STACK_VTYPE;
800 case MONO_TYPE_GENERICINST:
801 type = &type->data.generic_class->container_class->byval_arg;
805 g_assert (cfg->gshared);
806 if (mini_is_gsharedvt_type (type)) {
807 g_assert (cfg->gsharedvt);
808 inst->type = STACK_VTYPE;
810 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
814 g_error ("unknown type 0x%02x in eval stack type", type->type);
819 * The following tables are used to quickly validate the IL code in type_from_op ().
822 bin_num_table [STACK_MAX] [STACK_MAX] = {
823 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
828 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
836 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
839 /* reduce the size of this table */
841 bin_int_table [STACK_MAX] [STACK_MAX] = {
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
853 bin_comp_table [STACK_MAX] [STACK_MAX] = {
854 /* Inv i L p F & O vt r4 */
856 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
857 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
858 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
859 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
860 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
861 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
862 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
863 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
866 /* reduce the size of this table */
868 shift_table [STACK_MAX] [STACK_MAX] = {
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
880 * Tables to map from the non-specific opcode to the matching
881 * type-specific opcode.
883 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
885 binops_op_map [STACK_MAX] = {
886 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
889 /* handles from CEE_NEG to CEE_CONV_U8 */
891 unops_op_map [STACK_MAX] = {
892 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
895 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
897 ovfops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
901 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
903 ovf2ops_op_map [STACK_MAX] = {
904 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
907 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
909 ovf3ops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
913 /* handles from CEE_BEQ to CEE_BLT_UN */
915 beqops_op_map [STACK_MAX] = {
916 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
919 /* handles from CEE_CEQ to CEE_CLT_UN */
921 ceqops_op_map [STACK_MAX] = {
922 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
926 * Sets ins->type (the type on the eval stack) according to the
927 * type of the opcode and the arguments to it.
928 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
930 * FIXME: this function sets ins->type unconditionally in some cases, but
931 * it should set it to invalid for some types (a conv.x on an object)
934 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
936 switch (ins->opcode) {
943 /* FIXME: check unverifiable args for STACK_MP */
944 ins->type = bin_num_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_int_table [src1->type] [src2->type];
953 ins->opcode += binops_op_map [ins->type];
958 ins->type = shift_table [src1->type] [src2->type];
959 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
965 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
966 ins->opcode = OP_LCOMPARE;
967 else if (src1->type == STACK_R4)
968 ins->opcode = OP_RCOMPARE;
969 else if (src1->type == STACK_R8)
970 ins->opcode = OP_FCOMPARE;
972 ins->opcode = OP_ICOMPARE;
974 case OP_ICOMPARE_IMM:
975 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
976 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
977 ins->opcode = OP_LCOMPARE_IMM;
989 ins->opcode += beqops_op_map [src1->type];
992 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
993 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1000 ins->opcode += ceqops_op_map [src1->type];
1004 ins->type = neg_table [src1->type];
1005 ins->opcode += unops_op_map [ins->type];
1008 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1009 ins->type = src1->type;
1011 ins->type = STACK_INV;
1012 ins->opcode += unops_op_map [ins->type];
1018 ins->type = STACK_I4;
1019 ins->opcode += unops_op_map [src1->type];
1022 ins->type = STACK_R8;
1023 switch (src1->type) {
1026 ins->opcode = OP_ICONV_TO_R_UN;
1029 ins->opcode = OP_LCONV_TO_R_UN;
1033 case CEE_CONV_OVF_I1:
1034 case CEE_CONV_OVF_U1:
1035 case CEE_CONV_OVF_I2:
1036 case CEE_CONV_OVF_U2:
1037 case CEE_CONV_OVF_I4:
1038 case CEE_CONV_OVF_U4:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf3ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I_UN:
1043 case CEE_CONV_OVF_U_UN:
1044 ins->type = STACK_PTR;
1045 ins->opcode += ovf2ops_op_map [src1->type];
1047 case CEE_CONV_OVF_I1_UN:
1048 case CEE_CONV_OVF_I2_UN:
1049 case CEE_CONV_OVF_I4_UN:
1050 case CEE_CONV_OVF_U1_UN:
1051 case CEE_CONV_OVF_U2_UN:
1052 case CEE_CONV_OVF_U4_UN:
1053 ins->type = STACK_I4;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1057 ins->type = STACK_PTR;
1058 switch (src1->type) {
1060 ins->opcode = OP_ICONV_TO_U;
1064 #if SIZEOF_VOID_P == 8
1065 ins->opcode = OP_LCONV_TO_U;
1067 ins->opcode = OP_MOVE;
1071 ins->opcode = OP_LCONV_TO_U;
1074 ins->opcode = OP_FCONV_TO_U;
1080 ins->type = STACK_I8;
1081 ins->opcode += unops_op_map [src1->type];
1083 case CEE_CONV_OVF_I8:
1084 case CEE_CONV_OVF_U8:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf3ops_op_map [src1->type];
1088 case CEE_CONV_OVF_U8_UN:
1089 case CEE_CONV_OVF_I8_UN:
1090 ins->type = STACK_I8;
1091 ins->opcode += ovf2ops_op_map [src1->type];
1094 ins->type = cfg->r4_stack_type;
1095 ins->opcode += unops_op_map [src1->type];
1098 ins->type = STACK_R8;
1099 ins->opcode += unops_op_map [src1->type];
1102 ins->type = STACK_R8;
1106 ins->type = STACK_I4;
1107 ins->opcode += ovfops_op_map [src1->type];
1110 case CEE_CONV_OVF_I:
1111 case CEE_CONV_OVF_U:
1112 ins->type = STACK_PTR;
1113 ins->opcode += ovfops_op_map [src1->type];
1116 case CEE_ADD_OVF_UN:
1118 case CEE_MUL_OVF_UN:
1120 case CEE_SUB_OVF_UN:
1121 ins->type = bin_num_table [src1->type] [src2->type];
1122 ins->opcode += ovfops_op_map [src1->type];
1123 if (ins->type == STACK_R8)
1124 ins->type = STACK_INV;
1126 case OP_LOAD_MEMBASE:
1127 ins->type = STACK_PTR;
1129 case OP_LOADI1_MEMBASE:
1130 case OP_LOADU1_MEMBASE:
1131 case OP_LOADI2_MEMBASE:
1132 case OP_LOADU2_MEMBASE:
1133 case OP_LOADI4_MEMBASE:
1134 case OP_LOADU4_MEMBASE:
1135 ins->type = STACK_PTR;
1137 case OP_LOADI8_MEMBASE:
1138 ins->type = STACK_I8;
1140 case OP_LOADR4_MEMBASE:
1141 ins->type = cfg->r4_stack_type;
1143 case OP_LOADR8_MEMBASE:
1144 ins->type = STACK_R8;
1147 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1151 if (ins->type == STACK_MP)
1152 ins->klass = mono_defaults.object_class;
1157 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1163 param_table [STACK_MAX] [STACK_MAX] = {
1168 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1173 switch (args->type) {
1183 for (i = 0; i < sig->param_count; ++i) {
1184 switch (args [i].type) {
1188 if (!sig->params [i]->byref)
1192 if (sig->params [i]->byref)
1194 switch (sig->params [i]->type) {
1195 case MONO_TYPE_CLASS:
1196 case MONO_TYPE_STRING:
1197 case MONO_TYPE_OBJECT:
1198 case MONO_TYPE_SZARRAY:
1199 case MONO_TYPE_ARRAY:
1206 if (sig->params [i]->byref)
1208 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1217 /*if (!param_table [args [i].type] [sig->params [i]->type])
1225 * When we need a pointer to the current domain many times in a method, we
1226 * call mono_domain_get() once and we store the result in a local variable.
1227 * This function returns the variable that represents the MonoDomain*.
1229 inline static MonoInst *
1230 mono_get_domainvar (MonoCompile *cfg)
1232 if (!cfg->domainvar)
1233 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1234 return cfg->domainvar;
1238 * The got_var contains the address of the Global Offset Table when AOT
1242 mono_get_got_var (MonoCompile *cfg)
1244 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1246 if (!cfg->got_var) {
1247 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1249 return cfg->got_var;
1253 mono_get_vtable_var (MonoCompile *cfg)
1255 g_assert (cfg->gshared);
1257 if (!cfg->rgctx_var) {
1258 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1259 /* force the var to be stack allocated */
1260 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1263 return cfg->rgctx_var;
1267 type_from_stack_type (MonoInst *ins) {
1268 switch (ins->type) {
1269 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1270 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1271 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1272 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1273 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1275 return &ins->klass->this_arg;
1276 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1277 case STACK_VTYPE: return &ins->klass->byval_arg;
1279 g_error ("stack type %d to monotype not handled\n", ins->type);
1284 static G_GNUC_UNUSED int
1285 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1287 t = mono_type_get_underlying_type (t);
1299 case MONO_TYPE_FNPTR:
1301 case MONO_TYPE_CLASS:
1302 case MONO_TYPE_STRING:
1303 case MONO_TYPE_OBJECT:
1304 case MONO_TYPE_SZARRAY:
1305 case MONO_TYPE_ARRAY:
1311 return cfg->r4_stack_type;
1314 case MONO_TYPE_VALUETYPE:
1315 case MONO_TYPE_TYPEDBYREF:
1317 case MONO_TYPE_GENERICINST:
1318 if (mono_type_generic_inst_is_valuetype (t))
1324 g_assert_not_reached ();
1331 array_access_to_klass (int opcode)
1335 return mono_defaults.byte_class;
1337 return mono_defaults.uint16_class;
1340 return mono_defaults.int_class;
1343 return mono_defaults.sbyte_class;
1346 return mono_defaults.int16_class;
1349 return mono_defaults.int32_class;
1351 return mono_defaults.uint32_class;
1354 return mono_defaults.int64_class;
1357 return mono_defaults.single_class;
1360 return mono_defaults.double_class;
1361 case CEE_LDELEM_REF:
1362 case CEE_STELEM_REF:
1363 return mono_defaults.object_class;
1365 g_assert_not_reached ();
1371 * We try to share variables when possible
1374 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1379 /* inlining can result in deeper stacks */
1380 if (slot >= cfg->header->max_stack)
1381 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 pos = ins->type - 1 + slot * STACK_MAX;
1385 switch (ins->type) {
1392 if ((vnum = cfg->intvars [pos]))
1393 return cfg->varinfo [vnum];
1394 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1395 cfg->intvars [pos] = res->inst_c0;
1398 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1404 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1407 * Don't use this if a generic_context is set, since that means AOT can't
1408 * look up the method using just the image+token.
1409 * table == 0 means this is a reference made from a wrapper.
1411 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1412 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1413 jump_info_token->image = image;
1414 jump_info_token->token = token;
1415 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1420 * This function is called to handle items that are left on the evaluation stack
1421 * at basic block boundaries. What happens is that we save the values to local variables
1422 * and we reload them later when first entering the target basic block (with the
1423 * handle_loaded_temps () function).
1424 * A single joint point will use the same variables (stored in the array bb->out_stack or
1425 * bb->in_stack, if the basic block is before or after the joint point).
1427 * This function needs to be called _before_ emitting the last instruction of
1428 * the bb (i.e. before emitting a branch).
1429 * If the stack merge fails at a join point, cfg->unverifiable is set.
1432 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1435 MonoBasicBlock *bb = cfg->cbb;
1436 MonoBasicBlock *outb;
1437 MonoInst *inst, **locals;
1442 if (cfg->verbose_level > 3)
1443 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1444 if (!bb->out_scount) {
1445 bb->out_scount = count;
1446 //printf ("bblock %d has out:", bb->block_num);
1448 for (i = 0; i < bb->out_count; ++i) {
1449 outb = bb->out_bb [i];
1450 /* exception handlers are linked, but they should not be considered for stack args */
1451 if (outb->flags & BB_EXCEPTION_HANDLER)
1453 //printf (" %d", outb->block_num);
1454 if (outb->in_stack) {
1456 bb->out_stack = outb->in_stack;
1462 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1463 for (i = 0; i < count; ++i) {
1465 * try to reuse temps already allocated for this purpouse, if they occupy the same
1466 * stack slot and if they are of the same type.
1467 * This won't cause conflicts since if 'local' is used to
1468 * store one of the values in the in_stack of a bblock, then
1469 * the same variable will be used for the same outgoing stack
1471 * This doesn't work when inlining methods, since the bblocks
1472 * in the inlined methods do not inherit their in_stack from
1473 * the bblock they are inlined to. See bug #58863 for an
1476 if (cfg->inlined_method)
1477 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1479 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1484 for (i = 0; i < bb->out_count; ++i) {
1485 outb = bb->out_bb [i];
1486 /* exception handlers are linked, but they should not be considered for stack args */
1487 if (outb->flags & BB_EXCEPTION_HANDLER)
1489 if (outb->in_scount) {
1490 if (outb->in_scount != bb->out_scount) {
1491 cfg->unverifiable = TRUE;
1494 continue; /* check they are the same locals */
1496 outb->in_scount = count;
1497 outb->in_stack = bb->out_stack;
1500 locals = bb->out_stack;
1502 for (i = 0; i < count; ++i) {
1503 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1504 inst->cil_code = sp [i]->cil_code;
1505 sp [i] = locals [i];
1506 if (cfg->verbose_level > 3)
1507 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1511 * It is possible that the out bblocks already have in_stack assigned, and
1512 * the in_stacks differ. In this case, we will store to all the different
1519 /* Find a bblock which has a different in_stack */
1521 while (bindex < bb->out_count) {
1522 outb = bb->out_bb [bindex];
1523 /* exception handlers are linked, but they should not be considered for stack args */
1524 if (outb->flags & BB_EXCEPTION_HANDLER) {
1528 if (outb->in_stack != locals) {
1529 for (i = 0; i < count; ++i) {
1530 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1531 inst->cil_code = sp [i]->cil_code;
1532 sp [i] = locals [i];
1533 if (cfg->verbose_level > 3)
1534 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1536 locals = outb->in_stack;
1546 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1550 if (cfg->compile_aot) {
1551 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1557 ji.type = patch_type;
1558 ji.data.target = data;
1559 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1560 mono_error_assert_ok (&error);
1562 EMIT_NEW_PCONST (cfg, ins, target);
1568 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1570 int ibitmap_reg = alloc_preg (cfg);
1571 #ifdef COMPRESSED_INTERFACE_BITMAP
1573 MonoInst *res, *ins;
1574 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1575 MONO_ADD_INS (cfg->cbb, ins);
1577 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1578 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1579 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1581 int ibitmap_byte_reg = alloc_preg (cfg);
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1585 if (cfg->compile_aot) {
1586 int iid_reg = alloc_preg (cfg);
1587 int shifted_iid_reg = alloc_preg (cfg);
1588 int ibitmap_byte_address_reg = alloc_preg (cfg);
1589 int masked_iid_reg = alloc_preg (cfg);
1590 int iid_one_bit_reg = alloc_preg (cfg);
1591 int iid_bit_reg = alloc_preg (cfg);
1592 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1594 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1597 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1598 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1608 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1609 * stored in "klass_reg" implements the interface "klass".
1612 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1614 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1618 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1619 * stored in "vtable_reg" implements the interface "klass".
1622 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1624 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1628 * Emit code which checks whenever the interface id of @klass is smaller than
1629 * than the value given by max_iid_reg.
1632 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1633 MonoBasicBlock *false_target)
1635 if (cfg->compile_aot) {
1636 int iid_reg = alloc_preg (cfg);
1637 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1638 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1648 /* Same as above, but obtains max_iid from a vtable */
1650 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1651 MonoBasicBlock *false_target)
1653 int max_iid_reg = alloc_preg (cfg);
1655 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1656 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1659 /* Same as above, but obtains max_iid from a klass */
1661 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1662 MonoBasicBlock *false_target)
1664 int max_iid_reg = alloc_preg (cfg);
1666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1667 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1671 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1673 int idepth_reg = alloc_preg (cfg);
1674 int stypes_reg = alloc_preg (cfg);
1675 int stype = alloc_preg (cfg);
1677 mono_class_setup_supertypes (klass);
1679 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1687 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1688 } else if (cfg->compile_aot) {
1689 int const_reg = alloc_preg (cfg);
1690 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1699 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1701 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1705 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 int intf_reg = alloc_preg (cfg);
1709 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1710 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1715 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1719 * Variant of the above that takes a register to the class, not the vtable.
1722 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1724 int intf_bit_reg = alloc_preg (cfg);
1726 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1727 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1732 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1736 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1739 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1741 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1742 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1744 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1748 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1750 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1754 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1756 if (cfg->compile_aot) {
1757 int const_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1759 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1767 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1770 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1773 int rank_reg = alloc_preg (cfg);
1774 int eclass_reg = alloc_preg (cfg);
1776 g_assert (!klass_inst);
1777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1780 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1782 if (klass->cast_class == mono_defaults.object_class) {
1783 int parent_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1785 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1786 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1787 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1788 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1789 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1790 } else if (klass->cast_class == mono_defaults.enum_class) {
1791 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1792 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1793 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1795 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1796 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1799 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1800 /* Check that the object is a vector too */
1801 int bounds_reg = alloc_preg (cfg);
1802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1804 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1807 int idepth_reg = alloc_preg (cfg);
1808 int stypes_reg = alloc_preg (cfg);
1809 int stype = alloc_preg (cfg);
1811 mono_class_setup_supertypes (klass);
1813 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1816 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1820 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1825 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1827 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1831 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1835 g_assert (val == 0);
1840 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1849 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1851 #if SIZEOF_REGISTER == 8
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1859 val_reg = alloc_preg (cfg);
1861 if (SIZEOF_REGISTER == 8)
1862 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1864 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1867 /* This could be optimized further if neccesary */
1869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1876 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1907 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1914 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1915 g_assert (size < 10000);
1918 /* This could be optimized further if neccesary */
1920 cur_reg = alloc_preg (cfg);
1921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1929 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1931 cur_reg = alloc_preg (cfg);
1932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1949 cur_reg = alloc_preg (cfg);
1950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1957 cur_reg = alloc_preg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1967 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1971 if (cfg->compile_aot) {
1972 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1973 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1975 ins->sreg2 = c->dreg;
1976 MONO_ADD_INS (cfg->cbb, ins);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1980 ins->inst_offset = mini_get_tls_offset (tls_key);
1981 MONO_ADD_INS (cfg->cbb, ins);
1988 * Emit IR to push the current LMF onto the LMF stack.
1991 emit_push_lmf (MonoCompile *cfg)
1994 * Emit IR to push the LMF:
1995 * lmf_addr = <lmf_addr from tls>
1996 * lmf->lmf_addr = lmf_addr
1997 * lmf->prev_lmf = *lmf_addr
2000 int lmf_reg, prev_lmf_reg;
2001 MonoInst *ins, *lmf_ins;
2006 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2007 /* Load current lmf */
2008 lmf_ins = mono_get_lmf_intrinsic (cfg);
2010 MONO_ADD_INS (cfg->cbb, lmf_ins);
2011 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2012 lmf_reg = ins->dreg;
2013 /* Save previous_lmf */
2014 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2016 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2019 * Store lmf_addr in a variable, so it can be allocated to a global register.
2021 if (!cfg->lmf_addr_var)
2022 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2025 ins = mono_get_jit_tls_intrinsic (cfg);
2027 int jit_tls_dreg = ins->dreg;
2029 MONO_ADD_INS (cfg->cbb, ins);
2030 lmf_reg = alloc_preg (cfg);
2031 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2033 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2036 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2038 MONO_ADD_INS (cfg->cbb, lmf_ins);
2041 MonoInst *args [16], *jit_tls_ins, *ins;
2043 /* Inline mono_get_lmf_addr () */
2044 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2046 /* Load mono_jit_tls_id */
2047 if (cfg->compile_aot)
2048 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2050 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2051 /* call pthread_getspecific () */
2052 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2053 /* lmf_addr = &jit_tls->lmf */
2054 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2057 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2061 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2063 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2064 lmf_reg = ins->dreg;
2066 prev_lmf_reg = alloc_preg (cfg);
2067 /* Save previous_lmf */
2068 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2069 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2071 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2078 * Emit IR to pop the current LMF from the LMF stack.
2081 emit_pop_lmf (MonoCompile *cfg)
2083 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2089 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2090 lmf_reg = ins->dreg;
2092 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2093 /* Load previous_lmf */
2094 prev_lmf_reg = alloc_preg (cfg);
2095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2097 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2100 * Emit IR to pop the LMF:
2101 * *(lmf->lmf_addr) = lmf->prev_lmf
2103 /* This could be called before emit_push_lmf () */
2104 if (!cfg->lmf_addr_var)
2105 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2106 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2108 prev_lmf_reg = alloc_preg (cfg);
2109 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2110 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2115 emit_instrumentation_call (MonoCompile *cfg, void *func)
2117 MonoInst *iargs [1];
2120 * Avoid instrumenting inlined methods since it can
2121 * distort profiling results.
2123 if (cfg->method != cfg->current_method)
2126 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2127 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2128 mono_emit_jit_icall (cfg, func, iargs);
2133 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2136 type = mini_get_underlying_type (type);
2137 switch (type->type) {
2138 case MONO_TYPE_VOID:
2139 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2146 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2150 case MONO_TYPE_FNPTR:
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2152 case MONO_TYPE_CLASS:
2153 case MONO_TYPE_STRING:
2154 case MONO_TYPE_OBJECT:
2155 case MONO_TYPE_SZARRAY:
2156 case MONO_TYPE_ARRAY:
2157 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2160 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2163 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2165 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2167 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2168 case MONO_TYPE_VALUETYPE:
2169 if (type->data.klass->enumtype) {
2170 type = mono_class_enum_basetype (type->data.klass);
2173 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2174 case MONO_TYPE_TYPEDBYREF:
2175 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2176 case MONO_TYPE_GENERICINST:
2177 type = &type->data.generic_class->container_class->byval_arg;
2180 case MONO_TYPE_MVAR:
2182 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2184 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2190 * target_type_is_incompatible:
2191 * @cfg: MonoCompile context
2193 * Check that the item @arg on the evaluation stack can be stored
2194 * in the target type (can be a local, or field, etc).
2195 * The cfg arg can be used to check if we need verification or just
2198 * Returns: non-0 value if arg can't be stored on a target.
2201 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2203 MonoType *simple_type;
2206 if (target->byref) {
2207 /* FIXME: check that the pointed to types match */
2208 if (arg->type == STACK_MP) {
2209 MonoClass *base_class = mono_class_from_mono_type (target);
2210 /* This is needed to handle gshared types + ldaddr */
2211 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2212 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2214 if (arg->type == STACK_PTR)
2219 simple_type = mini_get_underlying_type (target);
2220 switch (simple_type->type) {
2221 case MONO_TYPE_VOID:
2229 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2233 /* STACK_MP is needed when setting pinned locals */
2234 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2239 case MONO_TYPE_FNPTR:
2241 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2242 * in native int. (#688008).
2244 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2247 case MONO_TYPE_CLASS:
2248 case MONO_TYPE_STRING:
2249 case MONO_TYPE_OBJECT:
2250 case MONO_TYPE_SZARRAY:
2251 case MONO_TYPE_ARRAY:
2252 if (arg->type != STACK_OBJ)
2254 /* FIXME: check type compatibility */
2258 if (arg->type != STACK_I8)
2262 if (arg->type != cfg->r4_stack_type)
2266 if (arg->type != STACK_R8)
2269 case MONO_TYPE_VALUETYPE:
2270 if (arg->type != STACK_VTYPE)
2272 klass = mono_class_from_mono_type (simple_type);
2273 if (klass != arg->klass)
2276 case MONO_TYPE_TYPEDBYREF:
2277 if (arg->type != STACK_VTYPE)
2279 klass = mono_class_from_mono_type (simple_type);
2280 if (klass != arg->klass)
2283 case MONO_TYPE_GENERICINST:
2284 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2285 MonoClass *target_class;
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 target_class = mono_class_from_mono_type (target);
2290 /* The second cases is needed when doing partial sharing */
2291 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2295 if (arg->type != STACK_OBJ)
2297 /* FIXME: check type compatibility */
2301 case MONO_TYPE_MVAR:
2302 g_assert (cfg->gshared);
2303 if (mini_type_var_is_vt (simple_type)) {
2304 if (arg->type != STACK_VTYPE)
2307 if (arg->type != STACK_OBJ)
2312 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2318 * Prepare arguments for passing to a function call.
2319 * Return a non-zero value if the arguments can't be passed to the given
2321 * The type checks are not yet complete and some conversions may need
2322 * casts on 32 or 64 bit architectures.
2324 * FIXME: implement this using target_type_is_incompatible ()
2327 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2329 MonoType *simple_type;
2333 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2337 for (i = 0; i < sig->param_count; ++i) {
2338 if (sig->params [i]->byref) {
2339 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2343 simple_type = mini_get_underlying_type (sig->params [i]);
2345 switch (simple_type->type) {
2346 case MONO_TYPE_VOID:
2355 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2361 case MONO_TYPE_FNPTR:
2362 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2365 case MONO_TYPE_CLASS:
2366 case MONO_TYPE_STRING:
2367 case MONO_TYPE_OBJECT:
2368 case MONO_TYPE_SZARRAY:
2369 case MONO_TYPE_ARRAY:
2370 if (args [i]->type != STACK_OBJ)
2375 if (args [i]->type != STACK_I8)
2379 if (args [i]->type != cfg->r4_stack_type)
2383 if (args [i]->type != STACK_R8)
2386 case MONO_TYPE_VALUETYPE:
2387 if (simple_type->data.klass->enumtype) {
2388 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2391 if (args [i]->type != STACK_VTYPE)
2394 case MONO_TYPE_TYPEDBYREF:
2395 if (args [i]->type != STACK_VTYPE)
2398 case MONO_TYPE_GENERICINST:
2399 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2402 case MONO_TYPE_MVAR:
2404 if (args [i]->type != STACK_VTYPE)
2408 g_error ("unknown type 0x%02x in check_call_signature",
2416 callvirt_to_call (int opcode)
2419 case OP_CALL_MEMBASE:
2421 case OP_VOIDCALL_MEMBASE:
2423 case OP_FCALL_MEMBASE:
2425 case OP_RCALL_MEMBASE:
2427 case OP_VCALL_MEMBASE:
2429 case OP_LCALL_MEMBASE:
2432 g_assert_not_reached ();
2439 callvirt_to_call_reg (int opcode)
2442 case OP_CALL_MEMBASE:
2444 case OP_VOIDCALL_MEMBASE:
2445 return OP_VOIDCALL_REG;
2446 case OP_FCALL_MEMBASE:
2447 return OP_FCALL_REG;
2448 case OP_RCALL_MEMBASE:
2449 return OP_RCALL_REG;
2450 case OP_VCALL_MEMBASE:
2451 return OP_VCALL_REG;
2452 case OP_LCALL_MEMBASE:
2453 return OP_LCALL_REG;
2455 g_assert_not_reached ();
2461 /* Either METHOD or IMT_ARG needs to be set */
2463 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2467 if (COMPILE_LLVM (cfg)) {
2469 method_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2472 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2473 method_reg = ins->dreg;
2477 call->imt_arg_reg = method_reg;
2479 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2484 method_reg = alloc_preg (cfg);
2485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2487 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2488 method_reg = ins->dreg;
2491 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2494 static MonoJumpInfo *
2495 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2497 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2501 ji->data.target = target;
2507 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2510 return mono_class_check_context_used (klass);
2516 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2519 return mono_method_check_context_used (method);
2525 * check_method_sharing:
2527 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2530 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2532 gboolean pass_vtable = FALSE;
2533 gboolean pass_mrgctx = FALSE;
2535 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2536 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2537 gboolean sharable = FALSE;
2539 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2543 * Pass vtable iff target method might
2544 * be shared, which means that sharing
2545 * is enabled for its class and its
2546 * context is sharable (and it's not a
2549 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2553 if (mini_method_get_context (cmethod) &&
2554 mini_method_get_context (cmethod)->method_inst) {
2555 g_assert (!pass_vtable);
2557 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2560 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2565 if (out_pass_vtable)
2566 *out_pass_vtable = pass_vtable;
2567 if (out_pass_mrgctx)
2568 *out_pass_mrgctx = pass_mrgctx;
2571 inline static MonoCallInst *
2572 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2573 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2577 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2585 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2587 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2589 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2592 call->signature = sig;
2593 call->rgctx_reg = rgctx;
2594 sig_ret = mini_get_underlying_type (sig->ret);
2596 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2599 if (mini_type_is_vtype (sig_ret)) {
2600 call->vret_var = cfg->vret_addr;
2601 //g_assert_not_reached ();
2603 } else if (mini_type_is_vtype (sig_ret)) {
2604 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2607 temp->backend.is_pinvoke = sig->pinvoke;
2610 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2611 * address of return value to increase optimization opportunities.
2612 * Before vtype decomposition, the dreg of the call ins itself represents the
2613 * fact the call modifies the return value. After decomposition, the call will
2614 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2615 * will be transformed into an LDADDR.
2617 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2618 loada->dreg = alloc_preg (cfg);
2619 loada->inst_p0 = temp;
2620 /* We reference the call too since call->dreg could change during optimization */
2621 loada->inst_p1 = call;
2622 MONO_ADD_INS (cfg->cbb, loada);
2624 call->inst.dreg = temp->dreg;
2626 call->vret_var = loada;
2627 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2628 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2630 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2631 if (COMPILE_SOFT_FLOAT (cfg)) {
2633 * If the call has a float argument, we would need to do an r8->r4 conversion using
2634 * an icall, but that cannot be done during the call sequence since it would clobber
2635 * the call registers + the stack. So we do it before emitting the call.
2637 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2639 MonoInst *in = call->args [i];
2641 if (i >= sig->hasthis)
2642 t = sig->params [i - sig->hasthis];
2644 t = &mono_defaults.int_class->byval_arg;
2645 t = mono_type_get_underlying_type (t);
2647 if (!t->byref && t->type == MONO_TYPE_R4) {
2648 MonoInst *iargs [1];
2652 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2654 /* The result will be in an int vreg */
2655 call->args [i] = conv;
2661 call->need_unbox_trampoline = unbox_trampoline;
2664 if (COMPILE_LLVM (cfg))
2665 mono_llvm_emit_call (cfg, call);
2667 mono_arch_emit_call (cfg, call);
2669 mono_arch_emit_call (cfg, call);
2672 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2673 cfg->flags |= MONO_CFG_HAS_CALLS;
2679 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2681 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2682 cfg->uses_rgctx_reg = TRUE;
2683 call->rgctx_reg = TRUE;
2685 call->rgctx_arg_reg = rgctx_reg;
2689 inline static MonoInst*
2690 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2695 gboolean check_sp = FALSE;
2697 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2698 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2700 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2705 rgctx_reg = mono_alloc_preg (cfg);
2706 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2710 if (!cfg->stack_inbalance_var)
2711 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2713 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 ins->dreg = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2718 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2720 call->inst.sreg1 = addr->dreg;
2723 emit_imt_argument (cfg, call, NULL, imt_arg);
2725 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2730 sp_reg = mono_alloc_preg (cfg);
2732 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2734 MONO_ADD_INS (cfg->cbb, ins);
2736 /* Restore the stack so we don't crash when throwing the exception */
2737 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2738 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2739 MONO_ADD_INS (cfg->cbb, ins);
2741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2742 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2746 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2748 return (MonoInst*)call;
2752 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2755 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2757 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2760 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2761 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2763 #ifndef DISABLE_REMOTING
2764 gboolean might_be_remote = FALSE;
2766 gboolean virtual_ = this_ins != NULL;
2767 gboolean enable_for_aot = TRUE;
2770 MonoInst *call_target = NULL;
2772 gboolean need_unbox_trampoline;
2775 sig = mono_method_signature (method);
2777 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2778 g_assert_not_reached ();
2781 rgctx_reg = mono_alloc_preg (cfg);
2782 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2785 if (method->string_ctor) {
2786 /* Create the real signature */
2787 /* FIXME: Cache these */
2788 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2789 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2794 context_used = mini_method_check_context_used (cfg, method);
2796 #ifndef DISABLE_REMOTING
2797 might_be_remote = this_ins && sig->hasthis &&
2798 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2799 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2801 if (might_be_remote && context_used) {
2804 g_assert (cfg->gshared);
2806 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2808 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2812 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2813 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2815 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2817 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2819 #ifndef DISABLE_REMOTING
2820 if (might_be_remote)
2821 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2824 call->method = method;
2825 call->inst.flags |= MONO_INST_HAS_METHOD;
2826 call->inst.inst_left = this_ins;
2827 call->tail_call = tail;
2830 int vtable_reg, slot_reg, this_reg;
2833 this_reg = this_ins->dreg;
2835 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2836 MonoInst *dummy_use;
2838 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2840 /* Make a call to delegate->invoke_impl */
2841 call->inst.inst_basereg = this_reg;
2842 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2845 /* We must emit a dummy use here because the delegate trampoline will
2846 replace the 'this' argument with the delegate target making this activation
2847 no longer a root for the delegate.
2848 This is an issue for delegates that target collectible code such as dynamic
2849 methods of GC'able assemblies.
2851 For a test case look into #667921.
2853 FIXME: a dummy use is not the best way to do it as the local register allocator
2854 will put it on a caller save register and spil it around the call.
2855 Ideally, we would either put it on a callee save register or only do the store part.
2857 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2859 return (MonoInst*)call;
2862 if ((!cfg->compile_aot || enable_for_aot) &&
2863 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2864 (MONO_METHOD_IS_FINAL (method) &&
2865 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2866 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2868 * the method is not virtual, we just need to ensure this is not null
2869 * and then we can call the method directly.
2871 #ifndef DISABLE_REMOTING
2872 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2874 * The check above ensures method is not gshared, this is needed since
2875 * gshared methods can't have wrappers.
2877 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2881 if (!method->string_ctor)
2882 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2884 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2885 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2887 * the method is virtual, but we can statically dispatch since either
2888 * it's class or the method itself are sealed.
2889 * But first we need to ensure it's not a null reference.
2891 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2893 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2894 } else if (call_target) {
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2898 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2899 call->inst.sreg1 = call_target->dreg;
2900 call->inst.flags &= !MONO_INST_HAS_METHOD;
2902 vtable_reg = alloc_preg (cfg);
2903 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2904 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2905 guint32 imt_slot = mono_method_get_imt_slot (method);
2906 emit_imt_argument (cfg, call, call->method, imt_arg);
2907 slot_reg = vtable_reg;
2908 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2910 slot_reg = vtable_reg;
2911 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2912 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2914 g_assert (mono_method_signature (method)->generic_param_count);
2915 emit_imt_argument (cfg, call, call->method, imt_arg);
2919 call->inst.sreg1 = slot_reg;
2920 call->inst.inst_offset = offset;
2921 call->is_virtual = TRUE;
2925 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2928 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2930 return (MonoInst*)call;
2934 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2936 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2940 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2947 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2950 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2952 return (MonoInst*)call;
2956 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2958 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2962 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2966 * mono_emit_abs_call:
2968 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2970 inline static MonoInst*
2971 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2972 MonoMethodSignature *sig, MonoInst **args)
2974 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2978 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2981 if (cfg->abs_patches == NULL)
2982 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2983 g_hash_table_insert (cfg->abs_patches, ji, ji);
2984 ins = mono_emit_native_call (cfg, ji, sig, args);
2985 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2989 static MonoMethodSignature*
2990 sig_to_rgctx_sig (MonoMethodSignature *sig)
2992 // FIXME: memory allocation
2993 MonoMethodSignature *res;
2996 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2997 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2998 res->param_count = sig->param_count + 1;
2999 for (i = 0; i < sig->param_count; ++i)
3000 res->params [i] = sig->params [i];
3001 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3005 /* Make an indirect call to FSIG passing an additional argument */
3007 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3009 MonoMethodSignature *csig;
3010 MonoInst *args_buf [16];
3012 int i, pindex, tmp_reg;
3014 /* Make a call with an rgctx/extra arg */
3015 if (fsig->param_count + 2 < 16)
3018 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3021 args [pindex ++] = orig_args [0];
3022 for (i = 0; i < fsig->param_count; ++i)
3023 args [pindex ++] = orig_args [fsig->hasthis + i];
3024 tmp_reg = alloc_preg (cfg);
3025 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3026 csig = sig_to_rgctx_sig (fsig);
3027 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3030 /* Emit an indirect call to the function descriptor ADDR */
3032 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3034 int addr_reg, arg_reg;
3035 MonoInst *call_target;
3037 g_assert (cfg->llvm_only);
3040 * addr points to a <addr, arg> pair, load both of them, and
3041 * make a call to addr, passing arg as an extra arg.
3043 addr_reg = alloc_preg (cfg);
3044 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3045 arg_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3048 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3052 direct_icalls_enabled (MonoCompile *cfg)
3054 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3056 if (cfg->compile_llvm)
3059 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3065 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3068 * Call the jit icall without a wrapper if possible.
3069 * The wrapper is needed for the following reasons:
3070 * - to handle exceptions thrown using mono_raise_exceptions () from the
3071 * icall function. The EH code needs the lmf frame pushed by the
3072 * wrapper to be able to unwind back to managed code.
3073 * - to be able to do stack walks for asynchronously suspended
3074 * threads when debugging.
3076 if (info->no_raise && direct_icalls_enabled (cfg)) {
3080 if (!info->wrapper_method) {
3081 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3082 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3084 mono_memory_barrier ();
3088 * Inline the wrapper method, which is basically a call to the C icall, and
3089 * an exception check.
3091 costs = inline_method (cfg, info->wrapper_method, NULL,
3092 args, NULL, cfg->real_offset, TRUE);
3093 g_assert (costs > 0);
3094 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3103 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3105 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3106 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3110 * Native code might return non register sized integers
3111 * without initializing the upper bits.
3113 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3114 case OP_LOADI1_MEMBASE:
3115 widen_op = OP_ICONV_TO_I1;
3117 case OP_LOADU1_MEMBASE:
3118 widen_op = OP_ICONV_TO_U1;
3120 case OP_LOADI2_MEMBASE:
3121 widen_op = OP_ICONV_TO_I2;
3123 case OP_LOADU2_MEMBASE:
3124 widen_op = OP_ICONV_TO_U2;
3130 if (widen_op != -1) {
3131 int dreg = alloc_preg (cfg);
3134 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3135 widen->type = ins->type;
3145 get_memcpy_method (void)
3147 static MonoMethod *memcpy_method = NULL;
3148 if (!memcpy_method) {
3149 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3151 g_error ("Old corlib found. Install a new one");
3153 return memcpy_method;
3157 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3159 MonoClassField *field;
3160 gpointer iter = NULL;
3162 while ((field = mono_class_get_fields (klass, &iter))) {
3165 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3167 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3168 if (mini_type_is_reference (mono_field_get_type (field))) {
3169 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3170 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3172 MonoClass *field_class = mono_class_from_mono_type (field->type);
3173 if (field_class->has_references)
3174 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3180 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3182 int card_table_shift_bits;
3183 gpointer card_table_mask;
3185 MonoInst *dummy_use;
3186 int nursery_shift_bits;
3187 size_t nursery_size;
3189 if (!cfg->gen_write_barriers)
3192 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3194 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3196 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3199 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3200 wbarrier->sreg1 = ptr->dreg;
3201 wbarrier->sreg2 = value->dreg;
3202 MONO_ADD_INS (cfg->cbb, wbarrier);
3203 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3204 int offset_reg = alloc_preg (cfg);
3208 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3209 if (card_table_mask)
3210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3212 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3213 * IMM's larger than 32bits.
3215 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3216 card_reg = ins->dreg;
3218 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3219 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3221 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3222 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3225 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3229 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3231 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3232 unsigned need_wb = 0;
3237 /*types with references can't have alignment smaller than sizeof(void*) */
3238 if (align < SIZEOF_VOID_P)
3241 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3242 if (size > 32 * SIZEOF_VOID_P)
3245 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3247 /* We don't unroll more than 5 stores to avoid code bloat. */
3248 if (size > 5 * SIZEOF_VOID_P) {
3249 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3250 size += (SIZEOF_VOID_P - 1);
3251 size &= ~(SIZEOF_VOID_P - 1);
3253 EMIT_NEW_ICONST (cfg, iargs [2], size);
3254 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3255 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3259 destreg = iargs [0]->dreg;
3260 srcreg = iargs [1]->dreg;
3263 dest_ptr_reg = alloc_preg (cfg);
3264 tmp_reg = alloc_preg (cfg);
3267 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3269 while (size >= SIZEOF_VOID_P) {
3270 MonoInst *load_inst;
3271 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3272 load_inst->dreg = tmp_reg;
3273 load_inst->inst_basereg = srcreg;
3274 load_inst->inst_offset = offset;
3275 MONO_ADD_INS (cfg->cbb, load_inst);
3277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3280 emit_write_barrier (cfg, iargs [0], load_inst);
3282 offset += SIZEOF_VOID_P;
3283 size -= SIZEOF_VOID_P;
3286 /*tmp += sizeof (void*)*/
3287 if (size >= SIZEOF_VOID_P) {
3288 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3289 MONO_ADD_INS (cfg->cbb, iargs [0]);
3293 /* Those cannot be references since size < sizeof (void*) */
3295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3302 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3309 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3310 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3319 * Emit code to copy a valuetype of type @klass whose address is stored in
3320 * @src->dreg to memory whose address is stored at @dest->dreg.
3323 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3325 MonoInst *iargs [4];
3328 MonoMethod *memcpy_method;
3329 MonoInst *size_ins = NULL;
3330 MonoInst *memcpy_ins = NULL;
3334 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3337 * This check breaks with spilled vars... need to handle it during verification anyway.
3338 * g_assert (klass && klass == src->klass && klass == dest->klass);
3341 if (mini_is_gsharedvt_klass (klass)) {
3343 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3344 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3348 n = mono_class_native_size (klass, &align);
3350 n = mono_class_value_size (klass, &align);
3352 /* if native is true there should be no references in the struct */
3353 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3354 /* Avoid barriers when storing to the stack */
3355 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3356 (dest->opcode == OP_LDADDR))) {
3362 context_used = mini_class_check_context_used (cfg, klass);
3364 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3365 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3367 } else if (context_used) {
3368 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3370 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3371 if (!cfg->compile_aot)
3372 mono_class_compute_gc_descriptor (klass);
3376 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3378 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3383 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3384 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3385 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3390 iargs [2] = size_ins;
3392 EMIT_NEW_ICONST (cfg, iargs [2], n);
3394 memcpy_method = get_memcpy_method ();
3396 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3398 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3403 get_memset_method (void)
3405 static MonoMethod *memset_method = NULL;
3406 if (!memset_method) {
3407 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3409 g_error ("Old corlib found. Install a new one");
3411 return memset_method;
3415 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3417 MonoInst *iargs [3];
3420 MonoMethod *memset_method;
3421 MonoInst *size_ins = NULL;
3422 MonoInst *bzero_ins = NULL;
3423 static MonoMethod *bzero_method;
3425 /* FIXME: Optimize this for the case when dest is an LDADDR */
3426 mono_class_init (klass);
3427 if (mini_is_gsharedvt_klass (klass)) {
3428 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3429 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3431 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3432 g_assert (bzero_method);
3434 iargs [1] = size_ins;
3435 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3439 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3441 n = mono_class_value_size (klass, &align);
3443 if (n <= sizeof (gpointer) * 8) {
3444 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3447 memset_method = get_memset_method ();
3449 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3450 EMIT_NEW_ICONST (cfg, iargs [2], n);
3451 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3458 * Emit IR to return either the this pointer for instance method,
3459 * or the mrgctx for static methods.
3462 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3464 MonoInst *this_ins = NULL;
3466 g_assert (cfg->gshared);
3468 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3469 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3470 !method->klass->valuetype)
3471 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3473 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3474 MonoInst *mrgctx_loc, *mrgctx_var;
3476 g_assert (!this_ins);
3477 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3479 mrgctx_loc = mono_get_vtable_var (cfg);
3480 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3483 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3484 MonoInst *vtable_loc, *vtable_var;
3486 g_assert (!this_ins);
3488 vtable_loc = mono_get_vtable_var (cfg);
3489 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3491 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3492 MonoInst *mrgctx_var = vtable_var;
3495 vtable_reg = alloc_preg (cfg);
3496 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3497 vtable_var->type = STACK_PTR;
3505 vtable_reg = alloc_preg (cfg);
3506 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3511 static MonoJumpInfoRgctxEntry *
3512 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3514 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3515 res->method = method;
3516 res->in_mrgctx = in_mrgctx;
3517 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3518 res->data->type = patch_type;
3519 res->data->data.target = patch_data;
3520 res->info_type = info_type;
3525 static inline MonoInst*
3526 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3528 MonoInst *args [16];
3531 // FIXME: No fastpath since the slot is not a compile time constant
3533 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3534 if (entry->in_mrgctx)
3535 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3537 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3541 * FIXME: This can be called during decompose, which is a problem since it creates
3543 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3545 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3547 MonoBasicBlock *is_null_bb, *end_bb;
3548 MonoInst *res, *ins, *call;
3551 slot = mini_get_rgctx_entry_slot (entry);
3553 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3554 index = MONO_RGCTX_SLOT_INDEX (slot);
3556 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3557 for (depth = 0; ; ++depth) {
3558 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3560 if (index < size - 1)
3565 NEW_BBLOCK (cfg, end_bb);
3566 NEW_BBLOCK (cfg, is_null_bb);
3569 rgctx_reg = rgctx->dreg;
3571 rgctx_reg = alloc_preg (cfg);
3573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3574 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3575 NEW_BBLOCK (cfg, is_null_bb);
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3581 for (i = 0; i < depth; ++i) {
3582 int array_reg = alloc_preg (cfg);
3584 /* load ptr to next array */
3585 if (mrgctx && i == 0)
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3589 rgctx_reg = array_reg;
3590 /* is the ptr null? */
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3592 /* if yes, jump to actual trampoline */
3593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3597 val_reg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3599 /* is the slot null? */
3600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3601 /* if yes, jump to actual trampoline */
3602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3605 res_reg = alloc_preg (cfg);
3606 MONO_INST_NEW (cfg, ins, OP_MOVE);
3607 ins->dreg = res_reg;
3608 ins->sreg1 = val_reg;
3609 MONO_ADD_INS (cfg->cbb, ins);
3611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3614 MONO_START_BB (cfg, is_null_bb);
3616 EMIT_NEW_ICONST (cfg, args [1], index);
3618 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3620 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3621 MONO_INST_NEW (cfg, ins, OP_MOVE);
3622 ins->dreg = res_reg;
3623 ins->sreg1 = call->dreg;
3624 MONO_ADD_INS (cfg->cbb, ins);
3625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3627 MONO_START_BB (cfg, end_bb);
3636 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3639 static inline MonoInst*
3640 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3643 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3645 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3649 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3650 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3652 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3653 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3655 return emit_rgctx_fetch (cfg, rgctx, entry);
3659 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3660 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3662 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3663 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3665 return emit_rgctx_fetch (cfg, rgctx, entry);
3669 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3670 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3672 MonoJumpInfoGSharedVtCall *call_info;
3673 MonoJumpInfoRgctxEntry *entry;
3676 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3677 call_info->sig = sig;
3678 call_info->method = cmethod;
3680 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3681 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3683 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 * emit_get_rgctx_virt_method:
3689 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3692 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3693 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3695 MonoJumpInfoVirtMethod *info;
3696 MonoJumpInfoRgctxEntry *entry;
3699 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3700 info->klass = klass;
3701 info->method = virt_method;
3703 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3704 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3706 return emit_rgctx_fetch (cfg, rgctx, entry);
3710 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3711 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3713 MonoJumpInfoRgctxEntry *entry;
3716 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3717 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 * emit_get_rgctx_method:
3725 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3726 * normal constants, else emit a load from the rgctx.
3729 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3730 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3732 if (!context_used) {
3735 switch (rgctx_type) {
3736 case MONO_RGCTX_INFO_METHOD:
3737 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3739 case MONO_RGCTX_INFO_METHOD_RGCTX:
3740 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3743 g_assert_not_reached ();
3746 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3747 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3749 return emit_rgctx_fetch (cfg, rgctx, entry);
3754 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3755 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3757 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3758 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3760 return emit_rgctx_fetch (cfg, rgctx, entry);
3764 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3766 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3767 MonoRuntimeGenericContextInfoTemplate *template_;
3772 for (i = 0; i < info->num_entries; ++i) {
3773 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3775 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3779 if (info->num_entries == info->count_entries) {
3780 MonoRuntimeGenericContextInfoTemplate *new_entries;
3781 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3783 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3785 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3786 info->entries = new_entries;
3787 info->count_entries = new_count_entries;
3790 idx = info->num_entries;
3791 template_ = &info->entries [idx];
3792 template_->info_type = rgctx_type;
3793 template_->data = data;
3795 info->num_entries ++;
3801 * emit_get_gsharedvt_info:
3803 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3806 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3811 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3812 /* Load info->entries [idx] */
3813 dreg = alloc_preg (cfg);
3814 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3820 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3822 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3826 * On return the caller must check @klass for load errors.
3829 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3831 MonoInst *vtable_arg;
3834 context_used = mini_class_check_context_used (cfg, klass);
3837 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3838 klass, MONO_RGCTX_INFO_VTABLE);
3840 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3844 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3847 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3851 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3852 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3854 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3855 ins->sreg1 = vtable_arg->dreg;
3856 MONO_ADD_INS (cfg->cbb, ins);
3858 static int byte_offset = -1;
3859 static guint8 bitmask;
3860 int bits_reg, inited_reg;
3861 MonoBasicBlock *inited_bb;
3862 MonoInst *args [16];
3864 if (byte_offset < 0)
3865 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3867 bits_reg = alloc_ireg (cfg);
3868 inited_reg = alloc_ireg (cfg);
3870 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3873 NEW_BBLOCK (cfg, inited_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3878 args [0] = vtable_arg;
3879 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3881 MONO_START_BB (cfg, inited_bb);
3886 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3890 if (cfg->gen_seq_points && cfg->method == method) {
3891 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3893 ins->flags |= MONO_INST_NONEMPTY_STACK;
3894 MONO_ADD_INS (cfg->cbb, ins);
3899 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3901 if (mini_get_debug_options ()->better_cast_details) {
3902 int vtable_reg = alloc_preg (cfg);
3903 int klass_reg = alloc_preg (cfg);
3904 MonoBasicBlock *is_null_bb = NULL;
3906 int to_klass_reg, context_used;
3909 NEW_BBLOCK (cfg, is_null_bb);
3911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3912 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3915 tls_get = mono_get_jit_tls_intrinsic (cfg);
3917 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3921 MONO_ADD_INS (cfg->cbb, tls_get);
3922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3927 context_used = mini_class_check_context_used (cfg, klass);
3929 MonoInst *class_ins;
3931 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3932 to_klass_reg = class_ins->dreg;
3934 to_klass_reg = alloc_preg (cfg);
3935 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3940 MONO_START_BB (cfg, is_null_bb);
3945 reset_cast_details (MonoCompile *cfg)
3947 /* Reset the variables holding the cast details */
3948 if (mini_get_debug_options ()->better_cast_details) {
3949 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3951 MONO_ADD_INS (cfg->cbb, tls_get);
3952 /* It is enough to reset the from field */
3953 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3958 * On return the caller must check @array_class for load errors
3961 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3963 int vtable_reg = alloc_preg (cfg);
3966 context_used = mini_class_check_context_used (cfg, array_class);
3968 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3970 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3972 if (cfg->opt & MONO_OPT_SHARED) {
3973 int class_reg = alloc_preg (cfg);
3976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3977 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3978 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3979 } else if (context_used) {
3980 MonoInst *vtable_ins;
3982 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3983 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3985 if (cfg->compile_aot) {
3989 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3991 vt_reg = alloc_preg (cfg);
3992 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3993 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3996 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4002 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4004 reset_cast_details (cfg);
4008 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4009 * generic code is generated.
4012 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4014 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4017 MonoInst *rgctx, *addr;
4019 /* FIXME: What if the class is shared? We might not
4020 have to get the address of the method from the
4022 addr = emit_get_rgctx_method (cfg, context_used, method,
4023 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4024 if (cfg->llvm_only && cfg->gsharedvt) {
4025 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4027 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4029 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4032 gboolean pass_vtable, pass_mrgctx;
4033 MonoInst *rgctx_arg = NULL;
4035 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4036 g_assert (!pass_mrgctx);
4039 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4042 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4045 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4050 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4054 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4055 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4056 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4057 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4059 obj_reg = sp [0]->dreg;
4060 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4061 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4063 /* FIXME: generics */
4064 g_assert (klass->rank == 0);
4067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4068 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4074 MonoInst *element_class;
4076 /* This assertion is from the unboxcast insn */
4077 g_assert (klass->rank == 0);
4079 element_class = emit_get_rgctx_klass (cfg, context_used,
4080 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4082 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4083 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4085 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4086 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4087 reset_cast_details (cfg);
4090 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4091 MONO_ADD_INS (cfg->cbb, add);
4092 add->type = STACK_MP;
4099 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4101 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4102 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4106 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4112 args [1] = klass_inst;
4115 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4117 NEW_BBLOCK (cfg, is_ref_bb);
4118 NEW_BBLOCK (cfg, is_nullable_bb);
4119 NEW_BBLOCK (cfg, end_bb);
4120 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4127 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4128 addr_reg = alloc_dreg (cfg, STACK_MP);
4132 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4133 MONO_ADD_INS (cfg->cbb, addr);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4138 MONO_START_BB (cfg, is_ref_bb);
4140 /* Save the ref to a temporary */
4141 dreg = alloc_ireg (cfg);
4142 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4143 addr->dreg = addr_reg;
4144 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4148 MONO_START_BB (cfg, is_nullable_bb);
4151 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4152 MonoInst *unbox_call;
4153 MonoMethodSignature *unbox_sig;
4155 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4156 unbox_sig->ret = &klass->byval_arg;
4157 unbox_sig->param_count = 1;
4158 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4161 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4163 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4165 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4166 addr->dreg = addr_reg;
4169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4172 MONO_START_BB (cfg, end_bb);
4175 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4181 * Returns NULL and set the cfg exception on error.
4184 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4186 MonoInst *iargs [2];
4191 MonoRgctxInfoType rgctx_info;
4192 MonoInst *iargs [2];
4193 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4195 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4197 if (cfg->opt & MONO_OPT_SHARED)
4198 rgctx_info = MONO_RGCTX_INFO_KLASS;
4200 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4201 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4203 if (cfg->opt & MONO_OPT_SHARED) {
4204 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4206 alloc_ftn = ves_icall_object_new;
4209 alloc_ftn = ves_icall_object_new_specific;
4212 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4213 if (known_instance_size) {
4214 int size = mono_class_instance_size (klass);
4215 if (size < sizeof (MonoObject))
4216 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4218 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4220 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4223 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4226 if (cfg->opt & MONO_OPT_SHARED) {
4227 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4228 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4230 alloc_ftn = ves_icall_object_new;
4231 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4232 /* This happens often in argument checking code, eg. throw new FooException... */
4233 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4234 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4235 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4237 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4238 MonoMethod *managed_alloc = NULL;
4242 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4243 cfg->exception_ptr = klass;
4247 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4249 if (managed_alloc) {
4250 int size = mono_class_instance_size (klass);
4251 if (size < sizeof (MonoObject))
4252 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4254 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4255 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4256 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4258 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4260 guint32 lw = vtable->klass->instance_size;
4261 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4262 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4263 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4266 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4270 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4274 * Returns NULL and set the cfg exception on error.
4277 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4279 MonoInst *alloc, *ins;
4281 if (mono_class_is_nullable (klass)) {
4282 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4285 if (cfg->llvm_only && cfg->gsharedvt) {
4286 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4287 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4288 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4290 /* FIXME: What if the class is shared? We might not
4291 have to get the method address from the RGCTX. */
4292 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4293 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4294 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4296 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4299 gboolean pass_vtable, pass_mrgctx;
4300 MonoInst *rgctx_arg = NULL;
4302 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4303 g_assert (!pass_mrgctx);
4306 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4309 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4312 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4316 if (mini_is_gsharedvt_klass (klass)) {
4317 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4318 MonoInst *res, *is_ref, *src_var, *addr;
4321 dreg = alloc_ireg (cfg);
4323 NEW_BBLOCK (cfg, is_ref_bb);
4324 NEW_BBLOCK (cfg, is_nullable_bb);
4325 NEW_BBLOCK (cfg, end_bb);
4326 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4334 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4338 ins->opcode = OP_STOREV_MEMBASE;
4340 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4341 res->type = STACK_OBJ;
4343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4346 MONO_START_BB (cfg, is_ref_bb);
4348 /* val is a vtype, so has to load the value manually */
4349 src_var = get_vreg_to_inst (cfg, val->dreg);
4351 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4352 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4357 MONO_START_BB (cfg, is_nullable_bb);
4360 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4361 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4363 MonoMethodSignature *box_sig;
4366 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4367 * construct that method at JIT time, so have to do things by hand.
4369 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4370 box_sig->ret = &mono_defaults.object_class->byval_arg;
4371 box_sig->param_count = 1;
4372 box_sig->params [0] = &klass->byval_arg;
4375 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4377 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4378 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4379 res->type = STACK_OBJ;
4383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4385 MONO_START_BB (cfg, end_bb);
4389 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4393 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4399 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4402 MonoGenericContainer *container;
4403 MonoGenericInst *ginst;
4405 if (klass->generic_class) {
4406 container = klass->generic_class->container_class->generic_container;
4407 ginst = klass->generic_class->context.class_inst;
4408 } else if (klass->generic_container && context_used) {
4409 container = klass->generic_container;
4410 ginst = container->context.class_inst;
4415 for (i = 0; i < container->type_argc; ++i) {
4417 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4419 type = ginst->type_argv [i];
4420 if (mini_type_is_reference (type))
4426 static GHashTable* direct_icall_type_hash;
4429 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4431 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4432 if (!direct_icalls_enabled (cfg))
4436 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4437 * Whitelist a few icalls for now.
4439 if (!direct_icall_type_hash) {
4440 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4442 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4443 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4444 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4445 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4446 mono_memory_barrier ();
4447 direct_icall_type_hash = h;
4450 if (cmethod->klass == mono_defaults.math_class)
4452 /* No locking needed */
4453 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4458 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4461 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4463 MonoMethod *mono_castclass;
4466 mono_castclass = mono_marshal_get_castclass_with_cache ();
4468 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4469 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4470 reset_cast_details (cfg);
4476 get_castclass_cache_idx (MonoCompile *cfg)
4478 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4479 cfg->castclass_cache_index ++;
4480 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4484 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4493 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4496 idx = get_castclass_cache_idx (cfg);
4497 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4499 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4500 return emit_castclass_with_cache (cfg, klass, args);
4504 * Returns NULL and set the cfg exception on error.
4507 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4509 MonoBasicBlock *is_null_bb;
4510 int obj_reg = src->dreg;
4511 int vtable_reg = alloc_preg (cfg);
4513 MonoInst *klass_inst = NULL, *res;
4515 context_used = mini_class_check_context_used (cfg, klass);
4517 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4518 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4519 (*inline_costs) += 2;
4521 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4522 MonoMethod *mono_castclass;
4523 MonoInst *iargs [1];
4526 mono_castclass = mono_marshal_get_castclass (klass);
4529 save_cast_details (cfg, klass, src->dreg, TRUE);
4530 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4531 iargs, ip, cfg->real_offset, TRUE);
4532 reset_cast_details (cfg);
4533 CHECK_CFG_EXCEPTION;
4534 g_assert (costs > 0);
4536 cfg->real_offset += 5;
4538 (*inline_costs) += costs;
4546 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4547 MonoInst *cache_ins;
4549 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4554 /* klass - it's the second element of the cache entry*/
4555 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4558 args [2] = cache_ins;
4560 return emit_castclass_with_cache (cfg, klass, args);
4563 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4566 NEW_BBLOCK (cfg, is_null_bb);
4568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4571 save_cast_details (cfg, klass, obj_reg, FALSE);
4573 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4575 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4577 int klass_reg = alloc_preg (cfg);
4579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4581 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4582 /* the remoting code is broken, access the class for now */
4583 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4584 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4586 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4587 cfg->exception_ptr = klass;
4590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4595 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4598 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4602 MONO_START_BB (cfg, is_null_bb);
4604 reset_cast_details (cfg);
4613 * Returns NULL and set the cfg exception on error.
4616 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4619 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4620 int obj_reg = src->dreg;
4621 int vtable_reg = alloc_preg (cfg);
4622 int res_reg = alloc_ireg_ref (cfg);
4623 MonoInst *klass_inst = NULL;
4628 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4629 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4630 MonoInst *cache_ins;
4632 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4637 /* klass - it's the second element of the cache entry*/
4638 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4641 args [2] = cache_ins;
4643 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4646 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4649 NEW_BBLOCK (cfg, is_null_bb);
4650 NEW_BBLOCK (cfg, false_bb);
4651 NEW_BBLOCK (cfg, end_bb);
4653 /* Do the assignment at the beginning, so the other assignment can be if converted */
4654 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4655 ins->type = STACK_OBJ;
4658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4663 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4664 g_assert (!context_used);
4665 /* the is_null_bb target simply copies the input register to the output */
4666 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4668 int klass_reg = alloc_preg (cfg);
4671 int rank_reg = alloc_preg (cfg);
4672 int eclass_reg = alloc_preg (cfg);
4674 g_assert (!context_used);
4675 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4680 if (klass->cast_class == mono_defaults.object_class) {
4681 int parent_reg = alloc_preg (cfg);
4682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4683 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4684 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4686 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4687 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4688 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4690 } else if (klass->cast_class == mono_defaults.enum_class) {
4691 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4696 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4697 /* Check that the object is a vector too */
4698 int bounds_reg = alloc_preg (cfg);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4704 /* the is_null_bb target simply copies the input register to the output */
4705 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4707 } else if (mono_class_is_nullable (klass)) {
4708 g_assert (!context_used);
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4710 /* the is_null_bb target simply copies the input register to the output */
4711 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4713 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4714 g_assert (!context_used);
4715 /* the remoting code is broken, access the class for now */
4716 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4717 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4719 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4720 cfg->exception_ptr = klass;
4723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4731 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4732 /* the is_null_bb target simply copies the input register to the output */
4733 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4738 MONO_START_BB (cfg, false_bb);
4740 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4741 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4743 MONO_START_BB (cfg, is_null_bb);
4745 MONO_START_BB (cfg, end_bb);
4751 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4753 /* This opcode takes as input an object reference and a class, and returns:
4754 0) if the object is an instance of the class,
4755 1) if the object is not instance of the class,
4756 2) if the object is a proxy whose type cannot be determined */
4759 #ifndef DISABLE_REMOTING
4760 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4762 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4764 int obj_reg = src->dreg;
4765 int dreg = alloc_ireg (cfg);
4767 #ifndef DISABLE_REMOTING
4768 int klass_reg = alloc_preg (cfg);
4771 NEW_BBLOCK (cfg, true_bb);
4772 NEW_BBLOCK (cfg, false_bb);
4773 NEW_BBLOCK (cfg, end_bb);
4774 #ifndef DISABLE_REMOTING
4775 NEW_BBLOCK (cfg, false2_bb);
4776 NEW_BBLOCK (cfg, no_proxy_bb);
4779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4782 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4783 #ifndef DISABLE_REMOTING
4784 NEW_BBLOCK (cfg, interface_fail_bb);
4787 tmp_reg = alloc_preg (cfg);
4788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4789 #ifndef DISABLE_REMOTING
4790 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4791 MONO_START_BB (cfg, interface_fail_bb);
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4794 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4796 tmp_reg = alloc_preg (cfg);
4797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4799 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4801 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4804 #ifndef DISABLE_REMOTING
4805 tmp_reg = alloc_preg (cfg);
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4809 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4810 tmp_reg = alloc_preg (cfg);
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4814 tmp_reg = alloc_preg (cfg);
4815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4817 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4819 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4820 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4822 MONO_START_BB (cfg, no_proxy_bb);
4824 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4826 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4830 MONO_START_BB (cfg, false_bb);
4832 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4835 #ifndef DISABLE_REMOTING
4836 MONO_START_BB (cfg, false2_bb);
4838 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4839 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4842 MONO_START_BB (cfg, true_bb);
4844 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4846 MONO_START_BB (cfg, end_bb);
4849 MONO_INST_NEW (cfg, ins, OP_ICONST);
4851 ins->type = STACK_I4;
4857 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4859 /* This opcode takes as input an object reference and a class, and returns:
4860 0) if the object is an instance of the class,
4861 1) if the object is a proxy whose type cannot be determined
4862 an InvalidCastException exception is thrown otherwhise*/
4865 #ifndef DISABLE_REMOTING
4866 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4868 MonoBasicBlock *ok_result_bb;
4870 int obj_reg = src->dreg;
4871 int dreg = alloc_ireg (cfg);
4872 int tmp_reg = alloc_preg (cfg);
4874 #ifndef DISABLE_REMOTING
4875 int klass_reg = alloc_preg (cfg);
4876 NEW_BBLOCK (cfg, end_bb);
4879 NEW_BBLOCK (cfg, ok_result_bb);
4881 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4882 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4884 save_cast_details (cfg, klass, obj_reg, FALSE);
4886 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4887 #ifndef DISABLE_REMOTING
4888 NEW_BBLOCK (cfg, interface_fail_bb);
4890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4891 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4892 MONO_START_BB (cfg, interface_fail_bb);
4893 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4895 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4897 tmp_reg = alloc_preg (cfg);
4898 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4900 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4902 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4903 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4905 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4906 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4910 #ifndef DISABLE_REMOTING
4911 NEW_BBLOCK (cfg, no_proxy_bb);
4913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4915 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4917 tmp_reg = alloc_preg (cfg);
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4921 tmp_reg = alloc_preg (cfg);
4922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4924 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4926 NEW_BBLOCK (cfg, fail_1_bb);
4928 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4930 MONO_START_BB (cfg, fail_1_bb);
4932 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4935 MONO_START_BB (cfg, no_proxy_bb);
4937 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4939 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4943 MONO_START_BB (cfg, ok_result_bb);
4945 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4947 #ifndef DISABLE_REMOTING
4948 MONO_START_BB (cfg, end_bb);
4952 MONO_INST_NEW (cfg, ins, OP_ICONST);
4954 ins->type = STACK_I4;
4959 static G_GNUC_UNUSED MonoInst*
4960 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4962 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4963 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4966 switch (enum_type->type) {
4969 #if SIZEOF_REGISTER == 8
4981 MonoInst *load, *and_, *cmp, *ceq;
4982 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4983 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4984 int dest_reg = alloc_ireg (cfg);
4986 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4987 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4988 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4989 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4991 ceq->type = STACK_I4;
4994 load = mono_decompose_opcode (cfg, load);
4995 and_ = mono_decompose_opcode (cfg, and_);
4996 cmp = mono_decompose_opcode (cfg, cmp);
4997 ceq = mono_decompose_opcode (cfg, ceq);
5005 * Returns NULL and set the cfg exception on error.
5007 static G_GNUC_UNUSED MonoInst*
5008 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5012 gpointer trampoline;
5013 MonoInst *obj, *method_ins, *tramp_ins;
5017 if (virtual_ && !cfg->llvm_only) {
5018 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5021 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5025 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5029 /* Inline the contents of mono_delegate_ctor */
5031 /* Set target field */
5032 /* Optimize away setting of NULL target */
5033 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5034 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5035 if (cfg->gen_write_barriers) {
5036 dreg = alloc_preg (cfg);
5037 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5038 emit_write_barrier (cfg, ptr, target);
5042 /* Set method field */
5043 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5047 * To avoid looking up the compiled code belonging to the target method
5048 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5049 * store it, and we fill it after the method has been compiled.
5051 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5052 MonoInst *code_slot_ins;
5055 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5057 domain = mono_domain_get ();
5058 mono_domain_lock (domain);
5059 if (!domain_jit_info (domain)->method_code_hash)
5060 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5061 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5063 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5064 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5066 mono_domain_unlock (domain);
5068 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5073 if (cfg->llvm_only) {
5074 MonoInst *args [16];
5079 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5080 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5083 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5089 if (cfg->compile_aot) {
5090 MonoDelegateClassMethodPair *del_tramp;
5092 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5093 del_tramp->klass = klass;
5094 del_tramp->method = context_used ? NULL : method;
5095 del_tramp->is_virtual = virtual_;
5096 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5099 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5101 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5102 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5105 /* Set invoke_impl field */
5107 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5109 dreg = alloc_preg (cfg);
5110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5113 dreg = alloc_preg (cfg);
5114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5118 dreg = alloc_preg (cfg);
5119 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5120 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5122 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5128 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5130 MonoJitICallInfo *info;
5132 /* Need to register the icall so it gets an icall wrapper */
5133 info = mono_get_array_new_va_icall (rank);
5135 cfg->flags |= MONO_CFG_HAS_VARARGS;
5137 /* mono_array_new_va () needs a vararg calling convention */
5138 cfg->exception_message = g_strdup ("array-new");
5139 cfg->disable_llvm = TRUE;
5141 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5142 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5146 * handle_constrained_gsharedvt_call:
5148 * Handle constrained calls where the receiver is a gsharedvt type.
5149 * Return the instruction representing the call. Set the cfg exception on failure.
5152 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5153 gboolean *ref_emit_widen)
5155 MonoInst *ins = NULL;
5156 gboolean emit_widen = *ref_emit_widen;
5159 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5160 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5161 * pack the arguments into an array, and do the rest of the work in in an icall.
5163 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5164 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5165 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5166 MonoInst *args [16];
5169 * This case handles calls to
5170 * - object:ToString()/Equals()/GetHashCode(),
5171 * - System.IComparable<T>:CompareTo()
5172 * - System.IEquatable<T>:Equals ()
5173 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5177 if (mono_method_check_context_used (cmethod))
5178 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5180 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5181 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5183 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5184 if (fsig->hasthis && fsig->param_count) {
5185 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5186 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5187 ins->dreg = alloc_preg (cfg);
5188 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5189 MONO_ADD_INS (cfg->cbb, ins);
5192 if (mini_is_gsharedvt_type (fsig->params [0])) {
5193 int addr_reg, deref_arg_reg;
5195 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5196 deref_arg_reg = alloc_preg (cfg);
5197 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5198 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5200 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5201 addr_reg = ins->dreg;
5202 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5204 EMIT_NEW_ICONST (cfg, args [3], 0);
5205 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5208 EMIT_NEW_ICONST (cfg, args [3], 0);
5209 EMIT_NEW_ICONST (cfg, args [4], 0);
5211 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5214 if (mini_is_gsharedvt_type (fsig->ret)) {
5215 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5216 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5220 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5221 MONO_ADD_INS (cfg->cbb, add);
5223 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5224 MONO_ADD_INS (cfg->cbb, ins);
5225 /* ins represents the call result */
5228 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5231 *ref_emit_widen = emit_widen;
5240 mono_emit_load_got_addr (MonoCompile *cfg)
5242 MonoInst *getaddr, *dummy_use;
5244 if (!cfg->got_var || cfg->got_var_allocated)
5247 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5248 getaddr->cil_code = cfg->header->code;
5249 getaddr->dreg = cfg->got_var->dreg;
5251 /* Add it to the start of the first bblock */
5252 if (cfg->bb_entry->code) {
5253 getaddr->next = cfg->bb_entry->code;
5254 cfg->bb_entry->code = getaddr;
5257 MONO_ADD_INS (cfg->bb_entry, getaddr);
5259 cfg->got_var_allocated = TRUE;
5262 * Add a dummy use to keep the got_var alive, since real uses might
5263 * only be generated by the back ends.
5264 * Add it to end_bblock, so the variable's lifetime covers the whole
5266 * It would be better to make the usage of the got var explicit in all
5267 * cases when the backend needs it (i.e. calls, throw etc.), so this
5268 * wouldn't be needed.
5270 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5271 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5274 static int inline_limit;
5275 static gboolean inline_limit_inited;
5278 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5280 MonoMethodHeaderSummary header;
5282 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5283 MonoMethodSignature *sig = mono_method_signature (method);
5287 if (cfg->disable_inline)
5292 if (cfg->inline_depth > 10)
5295 if (!mono_method_get_header_summary (method, &header))
5298 /*runtime, icall and pinvoke are checked by summary call*/
5299 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5300 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5301 (mono_class_is_marshalbyref (method->klass)) ||
5305 /* also consider num_locals? */
5306 /* Do the size check early to avoid creating vtables */
5307 if (!inline_limit_inited) {
5308 if (g_getenv ("MONO_INLINELIMIT"))
5309 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5311 inline_limit = INLINE_LENGTH_LIMIT;
5312 inline_limit_inited = TRUE;
5314 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5318 * if we can initialize the class of the method right away, we do,
5319 * otherwise we don't allow inlining if the class needs initialization,
5320 * since it would mean inserting a call to mono_runtime_class_init()
5321 * inside the inlined code
5323 if (!(cfg->opt & MONO_OPT_SHARED)) {
5324 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5325 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5326 vtable = mono_class_vtable (cfg->domain, method->klass);
5329 if (!cfg->compile_aot) {
5331 if (!mono_runtime_class_init_full (vtable, &error))
5332 mono_error_raise_exception (&error); /* FIXME don't raise here */
5334 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5335 if (cfg->run_cctors && method->klass->has_cctor) {
5336 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5337 if (!method->klass->runtime_info)
5338 /* No vtable created yet */
5340 vtable = mono_class_vtable (cfg->domain, method->klass);
5343 /* This makes so that inline cannot trigger */
5344 /* .cctors: too many apps depend on them */
5345 /* running with a specific order... */
5346 if (! vtable->initialized)
5349 if (!mono_runtime_class_init_full (vtable, &error))
5350 mono_error_raise_exception (&error); /* FIXME don't raise here */
5352 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5353 if (!method->klass->runtime_info)
5354 /* No vtable created yet */
5356 vtable = mono_class_vtable (cfg->domain, method->klass);
5359 if (!vtable->initialized)
5364 * If we're compiling for shared code
5365 * the cctor will need to be run at aot method load time, for example,
5366 * or at the end of the compilation of the inlining method.
5368 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5372 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5373 if (mono_arch_is_soft_float ()) {
5375 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5377 for (i = 0; i < sig->param_count; ++i)
5378 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5383 if (g_list_find (cfg->dont_inline, method))
5390 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5392 if (!cfg->compile_aot) {
5394 if (vtable->initialized)
5398 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5399 if (cfg->method == method)
5403 if (!mono_class_needs_cctor_run (klass, method))
5406 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5407 /* The initialization is already done before the method is called */
5414 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5418 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5421 if (mini_is_gsharedvt_variable_klass (klass)) {
5424 mono_class_init (klass);
5425 size = mono_class_array_element_size (klass);
5428 mult_reg = alloc_preg (cfg);
5429 array_reg = arr->dreg;
5430 index_reg = index->dreg;
5432 #if SIZEOF_REGISTER == 8
5433 /* The array reg is 64 bits but the index reg is only 32 */
5434 if (COMPILE_LLVM (cfg)) {
5436 index2_reg = index_reg;
5438 index2_reg = alloc_preg (cfg);
5439 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5442 if (index->type == STACK_I8) {
5443 index2_reg = alloc_preg (cfg);
5444 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5446 index2_reg = index_reg;
5451 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5453 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5454 if (size == 1 || size == 2 || size == 4 || size == 8) {
5455 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5457 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5458 ins->klass = mono_class_get_element_class (klass);
5459 ins->type = STACK_MP;
5465 add_reg = alloc_ireg_mp (cfg);
5468 MonoInst *rgctx_ins;
5471 g_assert (cfg->gshared);
5472 context_used = mini_class_check_context_used (cfg, klass);
5473 g_assert (context_used);
5474 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5475 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5479 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5480 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5481 ins->klass = mono_class_get_element_class (klass);
5482 ins->type = STACK_MP;
5483 MONO_ADD_INS (cfg->cbb, ins);
5489 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5491 int bounds_reg = alloc_preg (cfg);
5492 int add_reg = alloc_ireg_mp (cfg);
5493 int mult_reg = alloc_preg (cfg);
5494 int mult2_reg = alloc_preg (cfg);
5495 int low1_reg = alloc_preg (cfg);
5496 int low2_reg = alloc_preg (cfg);
5497 int high1_reg = alloc_preg (cfg);
5498 int high2_reg = alloc_preg (cfg);
5499 int realidx1_reg = alloc_preg (cfg);
5500 int realidx2_reg = alloc_preg (cfg);
5501 int sum_reg = alloc_preg (cfg);
5502 int index1, index2, tmpreg;
5506 mono_class_init (klass);
5507 size = mono_class_array_element_size (klass);
5509 index1 = index_ins1->dreg;
5510 index2 = index_ins2->dreg;
5512 #if SIZEOF_REGISTER == 8
5513 /* The array reg is 64 bits but the index reg is only 32 */
5514 if (COMPILE_LLVM (cfg)) {
5517 tmpreg = alloc_preg (cfg);
5518 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5520 tmpreg = alloc_preg (cfg);
5521 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5525 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5529 /* range checking */
5530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5531 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5534 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5535 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5537 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5538 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5539 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5542 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5543 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5545 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5547 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5549 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5550 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5552 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5553 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5555 ins->type = STACK_MP;
5557 MONO_ADD_INS (cfg->cbb, ins);
5563 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5567 MonoMethod *addr_method;
5569 MonoClass *eclass = cmethod->klass->element_class;
5571 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5574 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5576 /* emit_ldelema_2 depends on OP_LMUL */
5577 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5578 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5581 if (mini_is_gsharedvt_variable_klass (eclass))
5584 element_size = mono_class_array_element_size (eclass);
5585 addr_method = mono_marshal_get_array_address (rank, element_size);
5586 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5591 static MonoBreakPolicy
5592 always_insert_breakpoint (MonoMethod *method)
5594 return MONO_BREAK_POLICY_ALWAYS;
5597 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5600 * mono_set_break_policy:
5601 * policy_callback: the new callback function
5603 * Allow embedders to decide wherther to actually obey breakpoint instructions
5604 * (both break IL instructions and Debugger.Break () method calls), for example
5605 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5606 * untrusted or semi-trusted code.
5608 * @policy_callback will be called every time a break point instruction needs to
5609 * be inserted with the method argument being the method that calls Debugger.Break()
5610 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5611 * if it wants the breakpoint to not be effective in the given method.
5612 * #MONO_BREAK_POLICY_ALWAYS is the default.
5615 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5617 if (policy_callback)
5618 break_policy_func = policy_callback;
5620 break_policy_func = always_insert_breakpoint;
5624 should_insert_brekpoint (MonoMethod *method) {
5625 switch (break_policy_func (method)) {
5626 case MONO_BREAK_POLICY_ALWAYS:
5628 case MONO_BREAK_POLICY_NEVER:
5630 case MONO_BREAK_POLICY_ON_DBG:
5631 g_warning ("mdb no longer supported");
5634 g_warning ("Incorrect value returned from break policy callback");
5639 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5641 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5643 MonoInst *addr, *store, *load;
5644 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5646 /* the bounds check is already done by the callers */
5647 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5650 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5651 if (mini_type_is_reference (fsig->params [2]))
5652 emit_write_barrier (cfg, addr, load);
5654 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5655 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5662 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5664 return mini_type_is_reference (&klass->byval_arg);
5668 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5670 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5671 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5672 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5673 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5674 MonoInst *iargs [3];
5677 mono_class_setup_vtable (obj_array);
5678 g_assert (helper->slot);
5680 if (sp [0]->type != STACK_OBJ)
5682 if (sp [2]->type != STACK_OBJ)
5689 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5693 if (mini_is_gsharedvt_variable_klass (klass)) {
5696 // FIXME-VT: OP_ICONST optimization
5697 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5698 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5699 ins->opcode = OP_STOREV_MEMBASE;
5700 } else if (sp [1]->opcode == OP_ICONST) {
5701 int array_reg = sp [0]->dreg;
5702 int index_reg = sp [1]->dreg;
5703 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5705 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5706 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5709 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5710 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5712 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5714 if (generic_class_is_reference_type (cfg, klass))
5715 emit_write_barrier (cfg, addr, sp [2]);
5722 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5727 eklass = mono_class_from_mono_type (fsig->params [2]);
5729 eklass = mono_class_from_mono_type (fsig->ret);
5732 return emit_array_store (cfg, eklass, args, FALSE);
5734 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5735 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5741 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5744 int param_size, return_size;
5746 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5747 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5749 if (cfg->verbose_level > 3)
5750 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5752 //Don't allow mixing reference types with value types
5753 if (param_klass->valuetype != return_klass->valuetype) {
5754 if (cfg->verbose_level > 3)
5755 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5759 if (!param_klass->valuetype) {
5760 if (cfg->verbose_level > 3)
5761 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5766 if (param_klass->has_references || return_klass->has_references)
5769 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5770 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5771 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5772 if (cfg->verbose_level > 3)
5773 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5777 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5778 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5779 if (cfg->verbose_level > 3)
5780 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5784 param_size = mono_class_value_size (param_klass, &align);
5785 return_size = mono_class_value_size (return_klass, &align);
5787 //We can do it if sizes match
5788 if (param_size == return_size) {
5789 if (cfg->verbose_level > 3)
5790 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5794 //No simple way to handle struct if sizes don't match
5795 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5796 if (cfg->verbose_level > 3)
5797 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5802 * Same reg size category.
5803 * A quick note on why we don't require widening here.
5804 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5806 * Since the source value comes from a function argument, the JIT will already have
5807 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5809 if (param_size <= 4 && return_size <= 4) {
5810 if (cfg->verbose_level > 3)
5811 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5819 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5821 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5822 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5824 if (mini_is_gsharedvt_variable_type (fsig->ret))
5827 //Valuetypes that are semantically equivalent or numbers than can be widened to
5828 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5831 //Arrays of valuetypes that are semantically equivalent
5832 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5839 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5841 #ifdef MONO_ARCH_SIMD_INTRINSICS
5842 MonoInst *ins = NULL;
5844 if (cfg->opt & MONO_OPT_SIMD) {
5845 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5851 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5855 emit_memory_barrier (MonoCompile *cfg, int kind)
5857 MonoInst *ins = NULL;
5858 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5859 MONO_ADD_INS (cfg->cbb, ins);
5860 ins->backend.memory_barrier_kind = kind;
5866 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5868 MonoInst *ins = NULL;
5871 /* The LLVM backend supports these intrinsics */
5872 if (cmethod->klass == mono_defaults.math_class) {
5873 if (strcmp (cmethod->name, "Sin") == 0) {
5875 } else if (strcmp (cmethod->name, "Cos") == 0) {
5877 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5879 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5883 if (opcode && fsig->param_count == 1) {
5884 MONO_INST_NEW (cfg, ins, opcode);
5885 ins->type = STACK_R8;
5886 ins->dreg = mono_alloc_freg (cfg);
5887 ins->sreg1 = args [0]->dreg;
5888 MONO_ADD_INS (cfg->cbb, ins);
5892 if (cfg->opt & MONO_OPT_CMOV) {
5893 if (strcmp (cmethod->name, "Min") == 0) {
5894 if (fsig->params [0]->type == MONO_TYPE_I4)
5896 if (fsig->params [0]->type == MONO_TYPE_U4)
5897 opcode = OP_IMIN_UN;
5898 else if (fsig->params [0]->type == MONO_TYPE_I8)
5900 else if (fsig->params [0]->type == MONO_TYPE_U8)
5901 opcode = OP_LMIN_UN;
5902 } else if (strcmp (cmethod->name, "Max") == 0) {
5903 if (fsig->params [0]->type == MONO_TYPE_I4)
5905 if (fsig->params [0]->type == MONO_TYPE_U4)
5906 opcode = OP_IMAX_UN;
5907 else if (fsig->params [0]->type == MONO_TYPE_I8)
5909 else if (fsig->params [0]->type == MONO_TYPE_U8)
5910 opcode = OP_LMAX_UN;
5914 if (opcode && fsig->param_count == 2) {
5915 MONO_INST_NEW (cfg, ins, opcode);
5916 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5917 ins->dreg = mono_alloc_ireg (cfg);
5918 ins->sreg1 = args [0]->dreg;
5919 ins->sreg2 = args [1]->dreg;
5920 MONO_ADD_INS (cfg->cbb, ins);
5928 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5930 if (cmethod->klass == mono_defaults.array_class) {
5931 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5932 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5933 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5934 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5935 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5936 return emit_array_unsafe_mov (cfg, fsig, args);
5943 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5945 MonoInst *ins = NULL;
5947 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5949 if (cmethod->klass == mono_defaults.string_class) {
5950 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5951 int dreg = alloc_ireg (cfg);
5952 int index_reg = alloc_preg (cfg);
5953 int add_reg = alloc_preg (cfg);
5955 #if SIZEOF_REGISTER == 8
5956 if (COMPILE_LLVM (cfg)) {
5957 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5959 /* The array reg is 64 bits but the index reg is only 32 */
5960 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5963 index_reg = args [1]->dreg;
5965 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5967 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5968 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5969 add_reg = ins->dreg;
5970 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5973 int mult_reg = alloc_preg (cfg);
5974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5975 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5976 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5977 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5979 type_from_op (cfg, ins, NULL, NULL);
5981 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5982 int dreg = alloc_ireg (cfg);
5983 /* Decompose later to allow more optimizations */
5984 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5985 ins->type = STACK_I4;
5986 ins->flags |= MONO_INST_FAULT;
5987 cfg->cbb->has_array_access = TRUE;
5988 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5993 } else if (cmethod->klass == mono_defaults.object_class) {
5994 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5995 int dreg = alloc_ireg_ref (cfg);
5996 int vt_reg = alloc_preg (cfg);
5997 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5998 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5999 type_from_op (cfg, ins, NULL, NULL);
6002 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6003 int dreg = alloc_ireg (cfg);
6004 int t1 = alloc_ireg (cfg);
6006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6007 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6008 ins->type = STACK_I4;
6011 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6012 MONO_INST_NEW (cfg, ins, OP_NOP);
6013 MONO_ADD_INS (cfg->cbb, ins);
6017 } else if (cmethod->klass == mono_defaults.array_class) {
6018 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6019 return emit_array_generic_access (cfg, fsig, args, FALSE);
6020 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6021 return emit_array_generic_access (cfg, fsig, args, TRUE);
6023 #ifndef MONO_BIG_ARRAYS
6025 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6028 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6029 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6030 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6031 int dreg = alloc_ireg (cfg);
6032 int bounds_reg = alloc_ireg_mp (cfg);
6033 MonoBasicBlock *end_bb, *szarray_bb;
6034 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6036 NEW_BBLOCK (cfg, end_bb);
6037 NEW_BBLOCK (cfg, szarray_bb);
6039 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6040 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6043 /* Non-szarray case */
6045 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6046 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6048 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6049 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6051 MONO_START_BB (cfg, szarray_bb);
6054 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6055 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6057 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6058 MONO_START_BB (cfg, end_bb);
6060 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6061 ins->type = STACK_I4;
6067 if (cmethod->name [0] != 'g')
6070 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6071 int dreg = alloc_ireg (cfg);
6072 int vtable_reg = alloc_preg (cfg);
6073 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6074 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6075 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6076 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6077 type_from_op (cfg, ins, NULL, NULL);
6080 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6081 int dreg = alloc_ireg (cfg);
6083 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6084 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6085 type_from_op (cfg, ins, NULL, NULL);
6090 } else if (cmethod->klass == runtime_helpers_class) {
6091 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6092 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6096 } else if (cmethod->klass == mono_defaults.monitor_class) {
6097 gboolean is_enter = FALSE;
6098 gboolean is_v4 = FALSE;
6100 if (!strcmp (cmethod->name, "enter_with_atomic_var") && mono_method_signature (cmethod)->param_count == 2) {
6104 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6109 * To make async stack traces work, icalls which can block should have a wrapper.
6110 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6112 MonoBasicBlock *end_bb;
6114 NEW_BBLOCK (cfg, end_bb);
6116 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6119 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6120 MONO_START_BB (cfg, end_bb);
6123 } else if (cmethod->klass == mono_defaults.thread_class) {
6124 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6125 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6126 MONO_ADD_INS (cfg->cbb, ins);
6128 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6129 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6130 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6132 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6134 if (fsig->params [0]->type == MONO_TYPE_I1)
6135 opcode = OP_LOADI1_MEMBASE;
6136 else if (fsig->params [0]->type == MONO_TYPE_U1)
6137 opcode = OP_LOADU1_MEMBASE;
6138 else if (fsig->params [0]->type == MONO_TYPE_I2)
6139 opcode = OP_LOADI2_MEMBASE;
6140 else if (fsig->params [0]->type == MONO_TYPE_U2)
6141 opcode = OP_LOADU2_MEMBASE;
6142 else if (fsig->params [0]->type == MONO_TYPE_I4)
6143 opcode = OP_LOADI4_MEMBASE;
6144 else if (fsig->params [0]->type == MONO_TYPE_U4)
6145 opcode = OP_LOADU4_MEMBASE;
6146 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6147 opcode = OP_LOADI8_MEMBASE;
6148 else if (fsig->params [0]->type == MONO_TYPE_R4)
6149 opcode = OP_LOADR4_MEMBASE;
6150 else if (fsig->params [0]->type == MONO_TYPE_R8)
6151 opcode = OP_LOADR8_MEMBASE;
6152 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6153 opcode = OP_LOAD_MEMBASE;
6156 MONO_INST_NEW (cfg, ins, opcode);
6157 ins->inst_basereg = args [0]->dreg;
6158 ins->inst_offset = 0;
6159 MONO_ADD_INS (cfg->cbb, ins);
6161 switch (fsig->params [0]->type) {
6168 ins->dreg = mono_alloc_ireg (cfg);
6169 ins->type = STACK_I4;
6173 ins->dreg = mono_alloc_lreg (cfg);
6174 ins->type = STACK_I8;
6178 ins->dreg = mono_alloc_ireg (cfg);
6179 #if SIZEOF_REGISTER == 8
6180 ins->type = STACK_I8;
6182 ins->type = STACK_I4;
6187 ins->dreg = mono_alloc_freg (cfg);
6188 ins->type = STACK_R8;
6191 g_assert (mini_type_is_reference (fsig->params [0]));
6192 ins->dreg = mono_alloc_ireg_ref (cfg);
6193 ins->type = STACK_OBJ;
6197 if (opcode == OP_LOADI8_MEMBASE)
6198 ins = mono_decompose_opcode (cfg, ins);
6200 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6204 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6206 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6208 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6209 opcode = OP_STOREI1_MEMBASE_REG;
6210 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6211 opcode = OP_STOREI2_MEMBASE_REG;
6212 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6213 opcode = OP_STOREI4_MEMBASE_REG;
6214 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6215 opcode = OP_STOREI8_MEMBASE_REG;
6216 else if (fsig->params [0]->type == MONO_TYPE_R4)
6217 opcode = OP_STORER4_MEMBASE_REG;
6218 else if (fsig->params [0]->type == MONO_TYPE_R8)
6219 opcode = OP_STORER8_MEMBASE_REG;
6220 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6221 opcode = OP_STORE_MEMBASE_REG;
6224 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6226 MONO_INST_NEW (cfg, ins, opcode);
6227 ins->sreg1 = args [1]->dreg;
6228 ins->inst_destbasereg = args [0]->dreg;
6229 ins->inst_offset = 0;
6230 MONO_ADD_INS (cfg->cbb, ins);
6232 if (opcode == OP_STOREI8_MEMBASE_REG)
6233 ins = mono_decompose_opcode (cfg, ins);
6238 } else if (cmethod->klass->image == mono_defaults.corlib &&
6239 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6240 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6243 #if SIZEOF_REGISTER == 8
6244 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6245 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6246 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6247 ins->dreg = mono_alloc_preg (cfg);
6248 ins->sreg1 = args [0]->dreg;
6249 ins->type = STACK_I8;
6250 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6251 MONO_ADD_INS (cfg->cbb, ins);
6255 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6257 /* 64 bit reads are already atomic */
6258 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6259 load_ins->dreg = mono_alloc_preg (cfg);
6260 load_ins->inst_basereg = args [0]->dreg;
6261 load_ins->inst_offset = 0;
6262 load_ins->type = STACK_I8;
6263 MONO_ADD_INS (cfg->cbb, load_ins);
6265 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6272 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6273 MonoInst *ins_iconst;
6276 if (fsig->params [0]->type == MONO_TYPE_I4) {
6277 opcode = OP_ATOMIC_ADD_I4;
6278 cfg->has_atomic_add_i4 = TRUE;
6280 #if SIZEOF_REGISTER == 8
6281 else if (fsig->params [0]->type == MONO_TYPE_I8)
6282 opcode = OP_ATOMIC_ADD_I8;
6285 if (!mono_arch_opcode_supported (opcode))
6287 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6288 ins_iconst->inst_c0 = 1;
6289 ins_iconst->dreg = mono_alloc_ireg (cfg);
6290 MONO_ADD_INS (cfg->cbb, ins_iconst);
6292 MONO_INST_NEW (cfg, ins, opcode);
6293 ins->dreg = mono_alloc_ireg (cfg);
6294 ins->inst_basereg = args [0]->dreg;
6295 ins->inst_offset = 0;
6296 ins->sreg2 = ins_iconst->dreg;
6297 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6298 MONO_ADD_INS (cfg->cbb, ins);
6300 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6301 MonoInst *ins_iconst;
6304 if (fsig->params [0]->type == MONO_TYPE_I4) {
6305 opcode = OP_ATOMIC_ADD_I4;
6306 cfg->has_atomic_add_i4 = TRUE;
6308 #if SIZEOF_REGISTER == 8
6309 else if (fsig->params [0]->type == MONO_TYPE_I8)
6310 opcode = OP_ATOMIC_ADD_I8;
6313 if (!mono_arch_opcode_supported (opcode))
6315 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6316 ins_iconst->inst_c0 = -1;
6317 ins_iconst->dreg = mono_alloc_ireg (cfg);
6318 MONO_ADD_INS (cfg->cbb, ins_iconst);
6320 MONO_INST_NEW (cfg, ins, opcode);
6321 ins->dreg = mono_alloc_ireg (cfg);
6322 ins->inst_basereg = args [0]->dreg;
6323 ins->inst_offset = 0;
6324 ins->sreg2 = ins_iconst->dreg;
6325 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6326 MONO_ADD_INS (cfg->cbb, ins);
6328 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6331 if (fsig->params [0]->type == MONO_TYPE_I4) {
6332 opcode = OP_ATOMIC_ADD_I4;
6333 cfg->has_atomic_add_i4 = TRUE;
6335 #if SIZEOF_REGISTER == 8
6336 else if (fsig->params [0]->type == MONO_TYPE_I8)
6337 opcode = OP_ATOMIC_ADD_I8;
6340 if (!mono_arch_opcode_supported (opcode))
6342 MONO_INST_NEW (cfg, ins, opcode);
6343 ins->dreg = mono_alloc_ireg (cfg);
6344 ins->inst_basereg = args [0]->dreg;
6345 ins->inst_offset = 0;
6346 ins->sreg2 = args [1]->dreg;
6347 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6348 MONO_ADD_INS (cfg->cbb, ins);
6351 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6352 MonoInst *f2i = NULL, *i2f;
6353 guint32 opcode, f2i_opcode, i2f_opcode;
6354 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6355 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6357 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6358 fsig->params [0]->type == MONO_TYPE_R4) {
6359 opcode = OP_ATOMIC_EXCHANGE_I4;
6360 f2i_opcode = OP_MOVE_F_TO_I4;
6361 i2f_opcode = OP_MOVE_I4_TO_F;
6362 cfg->has_atomic_exchange_i4 = TRUE;
6364 #if SIZEOF_REGISTER == 8
6366 fsig->params [0]->type == MONO_TYPE_I8 ||
6367 fsig->params [0]->type == MONO_TYPE_R8 ||
6368 fsig->params [0]->type == MONO_TYPE_I) {
6369 opcode = OP_ATOMIC_EXCHANGE_I8;
6370 f2i_opcode = OP_MOVE_F_TO_I8;
6371 i2f_opcode = OP_MOVE_I8_TO_F;
6374 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6375 opcode = OP_ATOMIC_EXCHANGE_I4;
6376 cfg->has_atomic_exchange_i4 = TRUE;
6382 if (!mono_arch_opcode_supported (opcode))
6386 /* TODO: Decompose these opcodes instead of bailing here. */
6387 if (COMPILE_SOFT_FLOAT (cfg))
6390 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6391 f2i->dreg = mono_alloc_ireg (cfg);
6392 f2i->sreg1 = args [1]->dreg;
6393 if (f2i_opcode == OP_MOVE_F_TO_I4)
6394 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6395 MONO_ADD_INS (cfg->cbb, f2i);
6398 MONO_INST_NEW (cfg, ins, opcode);
6399 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6400 ins->inst_basereg = args [0]->dreg;
6401 ins->inst_offset = 0;
6402 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6403 MONO_ADD_INS (cfg->cbb, ins);
6405 switch (fsig->params [0]->type) {
6407 ins->type = STACK_I4;
6410 ins->type = STACK_I8;
6413 #if SIZEOF_REGISTER == 8
6414 ins->type = STACK_I8;
6416 ins->type = STACK_I4;
6421 ins->type = STACK_R8;
6424 g_assert (mini_type_is_reference (fsig->params [0]));
6425 ins->type = STACK_OBJ;
6430 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6431 i2f->dreg = mono_alloc_freg (cfg);
6432 i2f->sreg1 = ins->dreg;
6433 i2f->type = STACK_R8;
6434 if (i2f_opcode == OP_MOVE_I4_TO_F)
6435 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6436 MONO_ADD_INS (cfg->cbb, i2f);
6441 if (cfg->gen_write_barriers && is_ref)
6442 emit_write_barrier (cfg, args [0], args [1]);
6444 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6445 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6446 guint32 opcode, f2i_opcode, i2f_opcode;
6447 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6448 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6450 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6451 fsig->params [1]->type == MONO_TYPE_R4) {
6452 opcode = OP_ATOMIC_CAS_I4;
6453 f2i_opcode = OP_MOVE_F_TO_I4;
6454 i2f_opcode = OP_MOVE_I4_TO_F;
6455 cfg->has_atomic_cas_i4 = TRUE;
6457 #if SIZEOF_REGISTER == 8
6459 fsig->params [1]->type == MONO_TYPE_I8 ||
6460 fsig->params [1]->type == MONO_TYPE_R8 ||
6461 fsig->params [1]->type == MONO_TYPE_I) {
6462 opcode = OP_ATOMIC_CAS_I8;
6463 f2i_opcode = OP_MOVE_F_TO_I8;
6464 i2f_opcode = OP_MOVE_I8_TO_F;
6467 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6468 opcode = OP_ATOMIC_CAS_I4;
6469 cfg->has_atomic_cas_i4 = TRUE;
6475 if (!mono_arch_opcode_supported (opcode))
6479 /* TODO: Decompose these opcodes instead of bailing here. */
6480 if (COMPILE_SOFT_FLOAT (cfg))
6483 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6484 f2i_new->dreg = mono_alloc_ireg (cfg);
6485 f2i_new->sreg1 = args [1]->dreg;
6486 if (f2i_opcode == OP_MOVE_F_TO_I4)
6487 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6488 MONO_ADD_INS (cfg->cbb, f2i_new);
6490 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6491 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6492 f2i_cmp->sreg1 = args [2]->dreg;
6493 if (f2i_opcode == OP_MOVE_F_TO_I4)
6494 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6495 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6498 MONO_INST_NEW (cfg, ins, opcode);
6499 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6500 ins->sreg1 = args [0]->dreg;
6501 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6502 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6503 MONO_ADD_INS (cfg->cbb, ins);
6505 switch (fsig->params [1]->type) {
6507 ins->type = STACK_I4;
6510 ins->type = STACK_I8;
6513 #if SIZEOF_REGISTER == 8
6514 ins->type = STACK_I8;
6516 ins->type = STACK_I4;
6520 ins->type = cfg->r4_stack_type;
6523 ins->type = STACK_R8;
6526 g_assert (mini_type_is_reference (fsig->params [1]));
6527 ins->type = STACK_OBJ;
6532 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6533 i2f->dreg = mono_alloc_freg (cfg);
6534 i2f->sreg1 = ins->dreg;
6535 i2f->type = STACK_R8;
6536 if (i2f_opcode == OP_MOVE_I4_TO_F)
6537 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6538 MONO_ADD_INS (cfg->cbb, i2f);
6543 if (cfg->gen_write_barriers && is_ref)
6544 emit_write_barrier (cfg, args [0], args [1]);
6546 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6547 fsig->params [1]->type == MONO_TYPE_I4) {
6548 MonoInst *cmp, *ceq;
6550 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6553 /* int32 r = CAS (location, value, comparand); */
6554 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6555 ins->dreg = alloc_ireg (cfg);
6556 ins->sreg1 = args [0]->dreg;
6557 ins->sreg2 = args [1]->dreg;
6558 ins->sreg3 = args [2]->dreg;
6559 ins->type = STACK_I4;
6560 MONO_ADD_INS (cfg->cbb, ins);
6562 /* bool result = r == comparand; */
6563 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6564 cmp->sreg1 = ins->dreg;
6565 cmp->sreg2 = args [2]->dreg;
6566 cmp->type = STACK_I4;
6567 MONO_ADD_INS (cfg->cbb, cmp);
6569 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6570 ceq->dreg = alloc_ireg (cfg);
6571 ceq->type = STACK_I4;
6572 MONO_ADD_INS (cfg->cbb, ceq);
6574 /* *success = result; */
6575 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6577 cfg->has_atomic_cas_i4 = TRUE;
6579 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6580 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6584 } else if (cmethod->klass->image == mono_defaults.corlib &&
6585 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6586 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6589 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6591 MonoType *t = fsig->params [0];
6593 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6595 g_assert (t->byref);
6596 /* t is a byref type, so the reference check is more complicated */
6597 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6598 if (t->type == MONO_TYPE_I1)
6599 opcode = OP_ATOMIC_LOAD_I1;
6600 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6601 opcode = OP_ATOMIC_LOAD_U1;
6602 else if (t->type == MONO_TYPE_I2)
6603 opcode = OP_ATOMIC_LOAD_I2;
6604 else if (t->type == MONO_TYPE_U2)
6605 opcode = OP_ATOMIC_LOAD_U2;
6606 else if (t->type == MONO_TYPE_I4)
6607 opcode = OP_ATOMIC_LOAD_I4;
6608 else if (t->type == MONO_TYPE_U4)
6609 opcode = OP_ATOMIC_LOAD_U4;
6610 else if (t->type == MONO_TYPE_R4)
6611 opcode = OP_ATOMIC_LOAD_R4;
6612 else if (t->type == MONO_TYPE_R8)
6613 opcode = OP_ATOMIC_LOAD_R8;
6614 #if SIZEOF_REGISTER == 8
6615 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6616 opcode = OP_ATOMIC_LOAD_I8;
6617 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6618 opcode = OP_ATOMIC_LOAD_U8;
6620 else if (t->type == MONO_TYPE_I)
6621 opcode = OP_ATOMIC_LOAD_I4;
6622 else if (is_ref || t->type == MONO_TYPE_U)
6623 opcode = OP_ATOMIC_LOAD_U4;
6627 if (!mono_arch_opcode_supported (opcode))
6630 MONO_INST_NEW (cfg, ins, opcode);
6631 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6632 ins->sreg1 = args [0]->dreg;
6633 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6634 MONO_ADD_INS (cfg->cbb, ins);
6637 case MONO_TYPE_BOOLEAN:
6644 ins->type = STACK_I4;
6648 ins->type = STACK_I8;
6652 #if SIZEOF_REGISTER == 8
6653 ins->type = STACK_I8;
6655 ins->type = STACK_I4;
6659 ins->type = cfg->r4_stack_type;
6662 ins->type = STACK_R8;
6666 ins->type = STACK_OBJ;
6672 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6674 MonoType *t = fsig->params [0];
6677 g_assert (t->byref);
6678 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6679 if (t->type == MONO_TYPE_I1)
6680 opcode = OP_ATOMIC_STORE_I1;
6681 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6682 opcode = OP_ATOMIC_STORE_U1;
6683 else if (t->type == MONO_TYPE_I2)
6684 opcode = OP_ATOMIC_STORE_I2;
6685 else if (t->type == MONO_TYPE_U2)
6686 opcode = OP_ATOMIC_STORE_U2;
6687 else if (t->type == MONO_TYPE_I4)
6688 opcode = OP_ATOMIC_STORE_I4;
6689 else if (t->type == MONO_TYPE_U4)
6690 opcode = OP_ATOMIC_STORE_U4;
6691 else if (t->type == MONO_TYPE_R4)
6692 opcode = OP_ATOMIC_STORE_R4;
6693 else if (t->type == MONO_TYPE_R8)
6694 opcode = OP_ATOMIC_STORE_R8;
6695 #if SIZEOF_REGISTER == 8
6696 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6697 opcode = OP_ATOMIC_STORE_I8;
6698 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6699 opcode = OP_ATOMIC_STORE_U8;
6701 else if (t->type == MONO_TYPE_I)
6702 opcode = OP_ATOMIC_STORE_I4;
6703 else if (is_ref || t->type == MONO_TYPE_U)
6704 opcode = OP_ATOMIC_STORE_U4;
6708 if (!mono_arch_opcode_supported (opcode))
6711 MONO_INST_NEW (cfg, ins, opcode);
6712 ins->dreg = args [0]->dreg;
6713 ins->sreg1 = args [1]->dreg;
6714 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6715 MONO_ADD_INS (cfg->cbb, ins);
6717 if (cfg->gen_write_barriers && is_ref)
6718 emit_write_barrier (cfg, args [0], args [1]);
6724 } else if (cmethod->klass->image == mono_defaults.corlib &&
6725 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6726 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6727 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6728 if (should_insert_brekpoint (cfg->method)) {
6729 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6731 MONO_INST_NEW (cfg, ins, OP_NOP);
6732 MONO_ADD_INS (cfg->cbb, ins);
6736 } else if (cmethod->klass->image == mono_defaults.corlib &&
6737 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6738 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6739 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6741 EMIT_NEW_ICONST (cfg, ins, 1);
6743 EMIT_NEW_ICONST (cfg, ins, 0);
6746 } else if (cmethod->klass->image == mono_defaults.corlib &&
6747 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6748 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6749 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6750 /* No stack walks are currently available, so implement this as an intrinsic */
6751 MonoInst *assembly_ins;
6753 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6754 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6757 } else if (cmethod->klass->image == mono_defaults.corlib &&
6758 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6759 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6760 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6761 /* No stack walks are currently available, so implement this as an intrinsic */
6762 MonoInst *method_ins;
6763 MonoMethod *declaring = cfg->method;
6765 /* This returns the declaring generic method */
6766 if (declaring->is_inflated)
6767 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6768 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6769 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6770 cfg->no_inline = TRUE;
6771 if (cfg->method != cfg->current_method)
6772 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6775 } else if (cmethod->klass == mono_defaults.math_class) {
6777 * There is general branchless code for Min/Max, but it does not work for
6779 * http://everything2.com/?node_id=1051618
6781 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6782 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6783 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6784 !strcmp (cmethod->klass->name, "Selector")) ||
6785 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6786 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6787 !strcmp (cmethod->klass->name, "Selector"))
6789 if (cfg->backend->have_objc_get_selector &&
6790 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6791 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6792 cfg->compile_aot && !cfg->llvm_only) {
6794 MonoJumpInfoToken *ji;
6799 cfg->exception_message = g_strdup ("GetHandle");
6800 cfg->disable_llvm = TRUE;
6802 if (args [0]->opcode == OP_GOT_ENTRY) {
6803 pi = (MonoInst *)args [0]->inst_p1;
6804 g_assert (pi->opcode == OP_PATCH_INFO);
6805 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6806 ji = (MonoJumpInfoToken *)pi->inst_p0;
6808 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6809 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6812 NULLIFY_INS (args [0]);
6815 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6816 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6817 ins->dreg = mono_alloc_ireg (cfg);
6819 ins->inst_p0 = mono_string_to_utf8 (s);
6820 MONO_ADD_INS (cfg->cbb, ins);
6825 #ifdef MONO_ARCH_SIMD_INTRINSICS
6826 if (cfg->opt & MONO_OPT_SIMD) {
6827 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6833 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6837 if (COMPILE_LLVM (cfg)) {
6838 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6843 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6847 * This entry point could be used later for arbitrary method
6850 inline static MonoInst*
6851 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6852 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6854 if (method->klass == mono_defaults.string_class) {
6855 /* managed string allocation support */
6856 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6857 MonoInst *iargs [2];
6858 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6859 MonoMethod *managed_alloc = NULL;
6861 g_assert (vtable); /*Should not fail since it System.String*/
6862 #ifndef MONO_CROSS_COMPILE
6863 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6867 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6868 iargs [1] = args [0];
6869 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6876 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6878 MonoInst *store, *temp;
6881 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6882 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6885 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6886 * would be different than the MonoInst's used to represent arguments, and
6887 * the ldelema implementation can't deal with that.
6888 * Solution: When ldelema is used on an inline argument, create a var for
6889 * it, emit ldelema on that var, and emit the saving code below in
6890 * inline_method () if needed.
6892 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6893 cfg->args [i] = temp;
6894 /* This uses cfg->args [i] which is set by the preceeding line */
6895 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6896 store->cil_code = sp [0]->cil_code;
6901 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6902 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6904 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6906 check_inline_called_method_name_limit (MonoMethod *called_method)
6909 static const char *limit = NULL;
6911 if (limit == NULL) {
6912 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6914 if (limit_string != NULL)
6915 limit = limit_string;
6920 if (limit [0] != '\0') {
6921 char *called_method_name = mono_method_full_name (called_method, TRUE);
6923 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6924 g_free (called_method_name);
6926 //return (strncmp_result <= 0);
6927 return (strncmp_result == 0);
6934 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6936 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6939 static const char *limit = NULL;
6941 if (limit == NULL) {
6942 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6943 if (limit_string != NULL) {
6944 limit = limit_string;
6950 if (limit [0] != '\0') {
6951 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6953 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6954 g_free (caller_method_name);
6956 //return (strncmp_result <= 0);
6957 return (strncmp_result == 0);
6965 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6967 static double r8_0 = 0.0;
6968 static float r4_0 = 0.0;
6972 rtype = mini_get_underlying_type (rtype);
6976 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6977 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6978 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6979 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6980 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6981 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6982 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6983 ins->type = STACK_R4;
6984 ins->inst_p0 = (void*)&r4_0;
6986 MONO_ADD_INS (cfg->cbb, ins);
6987 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6988 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6989 ins->type = STACK_R8;
6990 ins->inst_p0 = (void*)&r8_0;
6992 MONO_ADD_INS (cfg->cbb, ins);
6993 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6994 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6995 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6996 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6997 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6999 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7004 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7008 rtype = mini_get_underlying_type (rtype);
7012 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7013 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7014 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7015 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7016 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7017 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7018 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7019 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7020 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7021 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7022 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7023 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7024 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7025 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7027 emit_init_rvar (cfg, dreg, rtype);
7031 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7033 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7035 MonoInst *var = cfg->locals [local];
7036 if (COMPILE_SOFT_FLOAT (cfg)) {
7038 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7039 emit_init_rvar (cfg, reg, type);
7040 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7043 emit_init_rvar (cfg, var->dreg, type);
7045 emit_dummy_init_rvar (cfg, var->dreg, type);
7052 * Return the cost of inlining CMETHOD.
7055 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7056 guchar *ip, guint real_offset, gboolean inline_always)
7058 MonoInst *ins, *rvar = NULL;
7059 MonoMethodHeader *cheader;
7060 MonoBasicBlock *ebblock, *sbblock;
7062 MonoMethod *prev_inlined_method;
7063 MonoInst **prev_locals, **prev_args;
7064 MonoType **prev_arg_types;
7065 guint prev_real_offset;
7066 GHashTable *prev_cbb_hash;
7067 MonoBasicBlock **prev_cil_offset_to_bb;
7068 MonoBasicBlock *prev_cbb;
7069 unsigned char* prev_cil_start;
7070 guint32 prev_cil_offset_to_bb_len;
7071 MonoMethod *prev_current_method;
7072 MonoGenericContext *prev_generic_context;
7073 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7075 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7077 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7078 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7081 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7082 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7087 fsig = mono_method_signature (cmethod);
7089 if (cfg->verbose_level > 2)
7090 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7092 if (!cmethod->inline_info) {
7093 cfg->stat_inlineable_methods++;
7094 cmethod->inline_info = 1;
7097 /* allocate local variables */
7098 cheader = mono_method_get_header (cmethod);
7100 if (cheader == NULL || mono_loader_get_last_error ()) {
7102 mono_metadata_free_mh (cheader);
7103 if (inline_always && mono_loader_get_last_error ()) {
7104 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7105 mono_error_set_from_loader_error (&cfg->error);
7108 mono_loader_clear_error ();
7112 /*Must verify before creating locals as it can cause the JIT to assert.*/
7113 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7114 mono_metadata_free_mh (cheader);
7118 /* allocate space to store the return value */
7119 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7120 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7123 prev_locals = cfg->locals;
7124 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7125 for (i = 0; i < cheader->num_locals; ++i)
7126 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7128 /* allocate start and end blocks */
7129 /* This is needed so if the inline is aborted, we can clean up */
7130 NEW_BBLOCK (cfg, sbblock);
7131 sbblock->real_offset = real_offset;
7133 NEW_BBLOCK (cfg, ebblock);
7134 ebblock->block_num = cfg->num_bblocks++;
7135 ebblock->real_offset = real_offset;
7137 prev_args = cfg->args;
7138 prev_arg_types = cfg->arg_types;
7139 prev_inlined_method = cfg->inlined_method;
7140 cfg->inlined_method = cmethod;
7141 cfg->ret_var_set = FALSE;
7142 cfg->inline_depth ++;
7143 prev_real_offset = cfg->real_offset;
7144 prev_cbb_hash = cfg->cbb_hash;
7145 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7146 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7147 prev_cil_start = cfg->cil_start;
7148 prev_cbb = cfg->cbb;
7149 prev_current_method = cfg->current_method;
7150 prev_generic_context = cfg->generic_context;
7151 prev_ret_var_set = cfg->ret_var_set;
7152 prev_disable_inline = cfg->disable_inline;
7154 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7157 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7159 ret_var_set = cfg->ret_var_set;
7161 cfg->inlined_method = prev_inlined_method;
7162 cfg->real_offset = prev_real_offset;
7163 cfg->cbb_hash = prev_cbb_hash;
7164 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7165 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7166 cfg->cil_start = prev_cil_start;
7167 cfg->locals = prev_locals;
7168 cfg->args = prev_args;
7169 cfg->arg_types = prev_arg_types;
7170 cfg->current_method = prev_current_method;
7171 cfg->generic_context = prev_generic_context;
7172 cfg->ret_var_set = prev_ret_var_set;
7173 cfg->disable_inline = prev_disable_inline;
7174 cfg->inline_depth --;
7176 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7177 if (cfg->verbose_level > 2)
7178 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7180 cfg->stat_inlined_methods++;
7182 /* always add some code to avoid block split failures */
7183 MONO_INST_NEW (cfg, ins, OP_NOP);
7184 MONO_ADD_INS (prev_cbb, ins);
7186 prev_cbb->next_bb = sbblock;
7187 link_bblock (cfg, prev_cbb, sbblock);
7190 * Get rid of the begin and end bblocks if possible to aid local
7193 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7195 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7196 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7198 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7199 MonoBasicBlock *prev = ebblock->in_bb [0];
7201 if (prev->next_bb == ebblock) {
7202 mono_merge_basic_blocks (cfg, prev, ebblock);
7204 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7205 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7206 cfg->cbb = prev_cbb;
7209 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7214 * Its possible that the rvar is set in some prev bblock, but not in others.
7220 for (i = 0; i < ebblock->in_count; ++i) {
7221 bb = ebblock->in_bb [i];
7223 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7226 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7236 * If the inlined method contains only a throw, then the ret var is not
7237 * set, so set it to a dummy value.
7240 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7242 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7245 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7248 if (cfg->verbose_level > 2)
7249 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7250 cfg->exception_type = MONO_EXCEPTION_NONE;
7251 mono_loader_clear_error ();
7253 /* This gets rid of the newly added bblocks */
7254 cfg->cbb = prev_cbb;
7256 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7261 * Some of these comments may well be out-of-date.
7262 * Design decisions: we do a single pass over the IL code (and we do bblock
7263 * splitting/merging in the few cases when it's required: a back jump to an IL
7264 * address that was not already seen as bblock starting point).
7265 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7266 * Complex operations are decomposed in simpler ones right away. We need to let the
7267 * arch-specific code peek and poke inside this process somehow (except when the
7268 * optimizations can take advantage of the full semantic info of coarse opcodes).
7269 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7270 * MonoInst->opcode initially is the IL opcode or some simplification of that
7271 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7272 * opcode with value bigger than OP_LAST.
7273 * At this point the IR can be handed over to an interpreter, a dumb code generator
7274 * or to the optimizing code generator that will translate it to SSA form.
7276 * Profiling directed optimizations.
7277 * We may compile by default with few or no optimizations and instrument the code
7278 * or the user may indicate what methods to optimize the most either in a config file
7279 * or through repeated runs where the compiler applies offline the optimizations to
7280 * each method and then decides if it was worth it.
7283 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7284 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7285 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7286 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7287 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7288 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7289 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7290 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7292 /* offset from br.s -> br like opcodes */
7293 #define BIG_BRANCH_OFFSET 13
7296 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7298 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7300 return b == NULL || b == bb;
7304 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7306 unsigned char *ip = start;
7307 unsigned char *target;
7310 MonoBasicBlock *bblock;
7311 const MonoOpcode *opcode;
7314 cli_addr = ip - start;
7315 i = mono_opcode_value ((const guint8 **)&ip, end);
7318 opcode = &mono_opcodes [i];
7319 switch (opcode->argument) {
7320 case MonoInlineNone:
7323 case MonoInlineString:
7324 case MonoInlineType:
7325 case MonoInlineField:
7326 case MonoInlineMethod:
7329 case MonoShortInlineR:
7336 case MonoShortInlineVar:
7337 case MonoShortInlineI:
7340 case MonoShortInlineBrTarget:
7341 target = start + cli_addr + 2 + (signed char)ip [1];
7342 GET_BBLOCK (cfg, bblock, target);
7345 GET_BBLOCK (cfg, bblock, ip);
7347 case MonoInlineBrTarget:
7348 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7349 GET_BBLOCK (cfg, bblock, target);
7352 GET_BBLOCK (cfg, bblock, ip);
7354 case MonoInlineSwitch: {
7355 guint32 n = read32 (ip + 1);
7358 cli_addr += 5 + 4 * n;
7359 target = start + cli_addr;
7360 GET_BBLOCK (cfg, bblock, target);
7362 for (j = 0; j < n; ++j) {
7363 target = start + cli_addr + (gint32)read32 (ip);
7364 GET_BBLOCK (cfg, bblock, target);
7374 g_assert_not_reached ();
7377 if (i == CEE_THROW) {
7378 unsigned char *bb_start = ip - 1;
7380 /* Find the start of the bblock containing the throw */
7382 while ((bb_start >= start) && !bblock) {
7383 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7387 bblock->out_of_line = 1;
7397 static inline MonoMethod *
7398 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7402 mono_error_init (error);
7404 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7405 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7407 method = mono_class_inflate_generic_method_checked (method, context, error);
7410 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7416 static inline MonoMethod *
7417 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7420 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7422 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7423 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7427 if (!method && !cfg)
7428 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7433 static inline MonoClass*
7434 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7439 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7440 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7442 klass = mono_class_inflate_generic_class (klass, context);
7444 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7445 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7448 mono_class_init (klass);
7452 static inline MonoMethodSignature*
7453 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7455 MonoMethodSignature *fsig;
7457 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7458 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7460 fsig = mono_metadata_parse_signature (method->klass->image, token);
7464 fsig = mono_inflate_generic_signature(fsig, context, &error);
7466 g_assert(mono_error_ok(&error));
7472 throw_exception (void)
7474 static MonoMethod *method = NULL;
7477 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7478 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7485 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7487 MonoMethod *thrower = throw_exception ();
7490 EMIT_NEW_PCONST (cfg, args [0], ex);
7491 mono_emit_method_call (cfg, thrower, args, NULL);
7495 * Return the original method is a wrapper is specified. We can only access
7496 * the custom attributes from the original method.
7499 get_original_method (MonoMethod *method)
7501 if (method->wrapper_type == MONO_WRAPPER_NONE)
7504 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7505 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7508 /* in other cases we need to find the original method */
7509 return mono_marshal_method_from_wrapper (method);
7513 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7515 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7516 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7518 emit_throw_exception (cfg, ex);
7522 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7524 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7525 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7527 emit_throw_exception (cfg, ex);
7531 * Check that the IL instructions at ip are the array initialization
7532 * sequence and return the pointer to the data and the size.
7535 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7538 * newarr[System.Int32]
7540 * ldtoken field valuetype ...
7541 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7543 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7545 guint32 token = read32 (ip + 7);
7546 guint32 field_token = read32 (ip + 2);
7547 guint32 field_index = field_token & 0xffffff;
7549 const char *data_ptr;
7551 MonoMethod *cmethod;
7552 MonoClass *dummy_class;
7553 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7557 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7561 *out_field_token = field_token;
7563 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7566 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7568 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7569 case MONO_TYPE_BOOLEAN:
7573 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7574 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7575 case MONO_TYPE_CHAR:
7592 if (size > mono_type_size (field->type, &dummy_align))
7595 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7596 if (!image_is_dynamic (method->klass->image)) {
7597 field_index = read32 (ip + 2) & 0xffffff;
7598 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7599 data_ptr = mono_image_rva_map (method->klass->image, rva);
7600 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7601 /* for aot code we do the lookup on load */
7602 if (aot && data_ptr)
7603 return (const char *)GUINT_TO_POINTER (rva);
7605 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7607 data_ptr = mono_field_get_data (field);
7615 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7617 char *method_fname = mono_method_full_name (method, TRUE);
7619 MonoMethodHeader *header = mono_method_get_header (method);
7621 if (header->code_size == 0)
7622 method_code = g_strdup ("method body is empty.");
7624 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7625 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7626 g_free (method_fname);
7627 g_free (method_code);
7628 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7632 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7635 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7636 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7637 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7638 /* Optimize reg-reg moves away */
7640 * Can't optimize other opcodes, since sp[0] might point to
7641 * the last ins of a decomposed opcode.
7643 sp [0]->dreg = (cfg)->locals [n]->dreg;
7645 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7650 * ldloca inhibits many optimizations so try to get rid of it in common
7653 static inline unsigned char *
7654 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7664 local = read16 (ip + 2);
7668 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7669 /* From the INITOBJ case */
7670 token = read32 (ip + 2);
7671 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7672 CHECK_TYPELOAD (klass);
7673 type = mini_get_underlying_type (&klass->byval_arg);
7674 emit_init_local (cfg, local, type, TRUE);
7682 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7684 MonoInst *icall_args [16];
7685 MonoInst *call_target, *ins, *vtable_ins;
7686 int arg_reg, this_reg, vtable_reg;
7687 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7688 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7689 gboolean variant_iface = FALSE;
7694 * In llvm-only mode, vtables contain function descriptors instead of
7695 * method addresses/trampolines.
7697 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7700 slot = mono_method_get_imt_slot (cmethod);
7702 slot = mono_method_get_vtable_index (cmethod);
7704 this_reg = sp [0]->dreg;
7706 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7707 variant_iface = TRUE;
7709 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7711 * The simplest case, a normal virtual call.
7713 int slot_reg = alloc_preg (cfg);
7714 int addr_reg = alloc_preg (cfg);
7715 int arg_reg = alloc_preg (cfg);
7716 MonoBasicBlock *non_null_bb;
7718 vtable_reg = alloc_preg (cfg);
7719 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7720 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7722 /* Load the vtable slot, which contains a function descriptor. */
7723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7725 NEW_BBLOCK (cfg, non_null_bb);
7727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7728 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7732 // FIXME: Make the wrapper use the preserveall cconv
7733 // FIXME: Use one icall per slot for small slot numbers ?
7734 icall_args [0] = vtable_ins;
7735 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7736 /* Make the icall return the vtable slot value to save some code space */
7737 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7738 ins->dreg = slot_reg;
7739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7742 MONO_START_BB (cfg, non_null_bb);
7743 /* Load the address + arg from the vtable slot */
7744 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7747 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7750 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7752 * A simple interface call
7754 * We make a call through an imt slot to obtain the function descriptor we need to call.
7755 * The imt slot contains a function descriptor for a runtime function + arg.
7757 int slot_reg = alloc_preg (cfg);
7758 int addr_reg = alloc_preg (cfg);
7759 int arg_reg = alloc_preg (cfg);
7760 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7762 vtable_reg = alloc_preg (cfg);
7763 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7764 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7767 * The slot is already initialized when the vtable is created so there is no need
7771 /* Load the imt slot, which contains a function descriptor. */
7772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7774 /* Load the address + arg of the imt thunk from the imt slot */
7775 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7776 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7778 * IMT thunks in llvm-only mode are C functions which take an info argument
7779 * plus the imt method and return the ftndesc to call.
7781 icall_args [0] = thunk_arg_ins;
7782 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7783 cmethod, MONO_RGCTX_INFO_METHOD);
7784 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7786 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7789 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7791 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7792 * dynamically extended as more instantiations are discovered.
7793 * This handles generic virtual methods both on classes and interfaces.
7795 int slot_reg = alloc_preg (cfg);
7796 int addr_reg = alloc_preg (cfg);
7797 int arg_reg = alloc_preg (cfg);
7798 int ftndesc_reg = alloc_preg (cfg);
7799 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7800 MonoBasicBlock *slowpath_bb, *end_bb;
7802 NEW_BBLOCK (cfg, slowpath_bb);
7803 NEW_BBLOCK (cfg, end_bb);
7805 vtable_reg = alloc_preg (cfg);
7806 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7808 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7810 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7812 /* Load the slot, which contains a function descriptor. */
7813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7815 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7816 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7821 /* Same as with iface calls */
7822 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7823 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7824 icall_args [0] = thunk_arg_ins;
7825 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7826 cmethod, MONO_RGCTX_INFO_METHOD);
7827 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7828 ftndesc_ins->dreg = ftndesc_reg;
7830 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7831 * they don't know about yet. Fall back to the slowpath in that case.
7833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7834 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7839 MONO_START_BB (cfg, slowpath_bb);
7840 icall_args [0] = vtable_ins;
7841 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7842 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7843 cmethod, MONO_RGCTX_INFO_METHOD);
7845 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7847 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7848 ftndesc_ins->dreg = ftndesc_reg;
7849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7852 MONO_START_BB (cfg, end_bb);
7853 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7857 * Non-optimized cases
7859 icall_args [0] = sp [0];
7860 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7862 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7863 cmethod, MONO_RGCTX_INFO_METHOD);
7865 arg_reg = alloc_preg (cfg);
7866 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7867 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7869 g_assert (is_gsharedvt);
7871 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7873 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7876 * Pass the extra argument even if the callee doesn't receive it, most
7877 * calling conventions allow this.
7879 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7883 is_exception_class (MonoClass *klass)
7886 if (klass == mono_defaults.exception_class)
7888 klass = klass->parent;
7894 * is_jit_optimizer_disabled:
7896 * Determine whenever M's assembly has a DebuggableAttribute with the
7897 * IsJITOptimizerDisabled flag set.
7900 is_jit_optimizer_disabled (MonoMethod *m)
7903 MonoAssembly *ass = m->klass->image->assembly;
7904 MonoCustomAttrInfo* attrs;
7907 gboolean val = FALSE;
7910 if (ass->jit_optimizer_disabled_inited)
7911 return ass->jit_optimizer_disabled;
7913 klass = mono_class_try_get_debuggable_attribute_class ();
7917 ass->jit_optimizer_disabled = FALSE;
7918 mono_memory_barrier ();
7919 ass->jit_optimizer_disabled_inited = TRUE;
7923 attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
7924 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7926 for (i = 0; i < attrs->num_attrs; ++i) {
7927 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7929 MonoMethodSignature *sig;
7931 if (!attr->ctor || attr->ctor->klass != klass)
7933 /* Decode the attribute. See reflection.c */
7934 p = (const char*)attr->data;
7935 g_assert (read16 (p) == 0x0001);
7938 // FIXME: Support named parameters
7939 sig = mono_method_signature (attr->ctor);
7940 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7942 /* Two boolean arguments */
7946 mono_custom_attrs_free (attrs);
7949 ass->jit_optimizer_disabled = val;
7950 mono_memory_barrier ();
7951 ass->jit_optimizer_disabled_inited = TRUE;
7957 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7959 gboolean supported_tail_call;
7962 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7964 for (i = 0; i < fsig->param_count; ++i) {
7965 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7966 /* These can point to the current method's stack */
7967 supported_tail_call = FALSE;
7969 if (fsig->hasthis && cmethod->klass->valuetype)
7970 /* this might point to the current method's stack */
7971 supported_tail_call = FALSE;
7972 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7973 supported_tail_call = FALSE;
7974 if (cfg->method->save_lmf)
7975 supported_tail_call = FALSE;
7976 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7977 supported_tail_call = FALSE;
7978 if (call_opcode != CEE_CALL)
7979 supported_tail_call = FALSE;
7981 /* Debugging support */
7983 if (supported_tail_call) {
7984 if (!mono_debug_count ())
7985 supported_tail_call = FALSE;
7989 return supported_tail_call;
7995 * Handle calls made to ctors from NEWOBJ opcodes.
7998 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7999 MonoInst **sp, guint8 *ip, int *inline_costs)
8001 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8003 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8004 mono_method_is_generic_sharable (cmethod, TRUE)) {
8005 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8006 mono_class_vtable (cfg->domain, cmethod->klass);
8007 CHECK_TYPELOAD (cmethod->klass);
8009 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8010 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8013 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8014 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8016 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8018 CHECK_TYPELOAD (cmethod->klass);
8019 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8024 /* Avoid virtual calls to ctors if possible */
8025 if (mono_class_is_marshalbyref (cmethod->klass))
8026 callvirt_this_arg = sp [0];
8028 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8029 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8030 CHECK_CFG_EXCEPTION;
8031 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8032 mono_method_check_inlining (cfg, cmethod) &&
8033 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8036 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8037 cfg->real_offset += 5;
8039 *inline_costs += costs - 5;
8041 INLINE_FAILURE ("inline failure");
8042 // FIXME-VT: Clean this up
8043 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8044 GSHAREDVT_FAILURE(*ip);
8045 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8047 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8050 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8052 if (cfg->llvm_only) {
8053 // FIXME: Avoid initializing vtable_arg
8054 emit_llvmonly_calli (cfg, fsig, sp, addr);
8056 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8058 } else if (context_used &&
8059 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8060 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8061 MonoInst *cmethod_addr;
8063 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8065 if (cfg->llvm_only) {
8066 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8067 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8068 emit_llvmonly_calli (cfg, fsig, sp, addr);
8070 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8071 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8073 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8076 INLINE_FAILURE ("ctor call");
8077 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8078 callvirt_this_arg, NULL, vtable_arg);
8085 emit_setret (MonoCompile *cfg, MonoInst *val)
8087 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8090 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8093 if (!cfg->vret_addr) {
8094 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8096 EMIT_NEW_RETLOADA (cfg, ret_addr);
8098 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8099 ins->klass = mono_class_from_mono_type (ret_type);
8102 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8103 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8104 MonoInst *iargs [1];
8108 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8109 mono_arch_emit_setret (cfg, cfg->method, conv);
8111 mono_arch_emit_setret (cfg, cfg->method, val);
8114 mono_arch_emit_setret (cfg, cfg->method, val);
8120 * mono_method_to_ir:
8122 * Translate the .net IL into linear IR.
8125 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8126 MonoInst *return_var, MonoInst **inline_args,
8127 guint inline_offset, gboolean is_virtual_call)
8130 MonoInst *ins, **sp, **stack_start;
8131 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8132 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8133 MonoMethod *cmethod, *method_definition;
8134 MonoInst **arg_array;
8135 MonoMethodHeader *header;
8137 guint32 token, ins_flag;
8139 MonoClass *constrained_class = NULL;
8140 unsigned char *ip, *end, *target, *err_pos;
8141 MonoMethodSignature *sig;
8142 MonoGenericContext *generic_context = NULL;
8143 MonoGenericContainer *generic_container = NULL;
8144 MonoType **param_types;
8145 int i, n, start_new_bblock, dreg;
8146 int num_calls = 0, inline_costs = 0;
8147 int breakpoint_id = 0;
8149 GSList *class_inits = NULL;
8150 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8152 gboolean init_locals, seq_points, skip_dead_blocks;
8153 gboolean sym_seq_points = FALSE;
8154 MonoDebugMethodInfo *minfo;
8155 MonoBitSet *seq_point_locs = NULL;
8156 MonoBitSet *seq_point_set_locs = NULL;
8158 cfg->disable_inline = is_jit_optimizer_disabled (method);
8160 /* serialization and xdomain stuff may need access to private fields and methods */
8161 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8162 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8163 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8164 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8165 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8166 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8168 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8169 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8170 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8171 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8172 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8174 image = method->klass->image;
8175 header = mono_method_get_header (method);
8177 if (mono_loader_get_last_error ()) {
8178 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8179 mono_error_set_from_loader_error (&cfg->error);
8181 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name));
8183 goto exception_exit;
8185 generic_container = mono_method_get_generic_container (method);
8186 sig = mono_method_signature (method);
8187 num_args = sig->hasthis + sig->param_count;
8188 ip = (unsigned char*)header->code;
8189 cfg->cil_start = ip;
8190 end = ip + header->code_size;
8191 cfg->stat_cil_code_size += header->code_size;
8193 seq_points = cfg->gen_seq_points && cfg->method == method;
8195 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8196 /* We could hit a seq point before attaching to the JIT (#8338) */
8200 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8201 minfo = mono_debug_lookup_method (method);
8203 MonoSymSeqPoint *sps;
8204 int i, n_il_offsets;
8206 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8207 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8208 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8209 sym_seq_points = TRUE;
8210 for (i = 0; i < n_il_offsets; ++i) {
8211 if (sps [i].il_offset < header->code_size)
8212 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8215 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8216 /* Methods without line number info like auto-generated property accessors */
8217 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8218 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8219 sym_seq_points = TRUE;
8224 * Methods without init_locals set could cause asserts in various passes
8225 * (#497220). To work around this, we emit dummy initialization opcodes
8226 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8227 * on some platforms.
8229 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8230 init_locals = header->init_locals;
8234 method_definition = method;
8235 while (method_definition->is_inflated) {
8236 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8237 method_definition = imethod->declaring;
8240 /* SkipVerification is not allowed if core-clr is enabled */
8241 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8243 dont_verify_stloc = TRUE;
8246 if (sig->is_inflated)
8247 generic_context = mono_method_get_context (method);
8248 else if (generic_container)
8249 generic_context = &generic_container->context;
8250 cfg->generic_context = generic_context;
8253 g_assert (!sig->has_type_parameters);
8255 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8256 g_assert (method->is_inflated);
8257 g_assert (mono_method_get_context (method)->method_inst);
8259 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8260 g_assert (sig->generic_param_count);
8262 if (cfg->method == method) {
8263 cfg->real_offset = 0;
8265 cfg->real_offset = inline_offset;
8268 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8269 cfg->cil_offset_to_bb_len = header->code_size;
8271 cfg->current_method = method;
8273 if (cfg->verbose_level > 2)
8274 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8276 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8278 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8279 for (n = 0; n < sig->param_count; ++n)
8280 param_types [n + sig->hasthis] = sig->params [n];
8281 cfg->arg_types = param_types;
8283 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8284 if (cfg->method == method) {
8286 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8287 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8290 NEW_BBLOCK (cfg, start_bblock);
8291 cfg->bb_entry = start_bblock;
8292 start_bblock->cil_code = NULL;
8293 start_bblock->cil_length = 0;
8296 NEW_BBLOCK (cfg, end_bblock);
8297 cfg->bb_exit = end_bblock;
8298 end_bblock->cil_code = NULL;
8299 end_bblock->cil_length = 0;
8300 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8301 g_assert (cfg->num_bblocks == 2);
8303 arg_array = cfg->args;
8305 if (header->num_clauses) {
8306 cfg->spvars = g_hash_table_new (NULL, NULL);
8307 cfg->exvars = g_hash_table_new (NULL, NULL);
8309 /* handle exception clauses */
8310 for (i = 0; i < header->num_clauses; ++i) {
8311 MonoBasicBlock *try_bb;
8312 MonoExceptionClause *clause = &header->clauses [i];
8313 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8315 try_bb->real_offset = clause->try_offset;
8316 try_bb->try_start = TRUE;
8317 try_bb->region = ((i + 1) << 8) | clause->flags;
8318 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8319 tblock->real_offset = clause->handler_offset;
8320 tblock->flags |= BB_EXCEPTION_HANDLER;
8323 * Linking the try block with the EH block hinders inlining as we won't be able to
8324 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8326 if (COMPILE_LLVM (cfg))
8327 link_bblock (cfg, try_bb, tblock);
8329 if (*(ip + clause->handler_offset) == CEE_POP)
8330 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8332 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8333 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8334 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8335 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8336 MONO_ADD_INS (tblock, ins);
8338 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8339 /* finally clauses already have a seq point */
8340 /* seq points for filter clauses are emitted below */
8341 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8342 MONO_ADD_INS (tblock, ins);
8345 /* todo: is a fault block unsafe to optimize? */
8346 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8347 tblock->flags |= BB_EXCEPTION_UNSAFE;
8350 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8352 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8354 /* catch and filter blocks get the exception object on the stack */
8355 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8356 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8358 /* mostly like handle_stack_args (), but just sets the input args */
8359 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8360 tblock->in_scount = 1;
8361 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8362 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8366 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8367 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8368 if (!cfg->compile_llvm) {
8369 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8370 ins->dreg = tblock->in_stack [0]->dreg;
8371 MONO_ADD_INS (tblock, ins);
8374 MonoInst *dummy_use;
8377 * Add a dummy use for the exvar so its liveness info will be
8380 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8383 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8384 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8385 MONO_ADD_INS (tblock, ins);
8388 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8389 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8390 tblock->flags |= BB_EXCEPTION_HANDLER;
8391 tblock->real_offset = clause->data.filter_offset;
8392 tblock->in_scount = 1;
8393 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8394 /* The filter block shares the exvar with the handler block */
8395 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8396 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8397 MONO_ADD_INS (tblock, ins);
8401 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8402 clause->data.catch_class &&
8404 mono_class_check_context_used (clause->data.catch_class)) {
8406 * In shared generic code with catch
8407 * clauses containing type variables
8408 * the exception handling code has to
8409 * be able to get to the rgctx.
8410 * Therefore we have to make sure that
8411 * the vtable/mrgctx argument (for
8412 * static or generic methods) or the
8413 * "this" argument (for non-static
8414 * methods) are live.
8416 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8417 mini_method_get_context (method)->method_inst ||
8418 method->klass->valuetype) {
8419 mono_get_vtable_var (cfg);
8421 MonoInst *dummy_use;
8423 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8428 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8429 cfg->cbb = start_bblock;
8430 cfg->args = arg_array;
8431 mono_save_args (cfg, sig, inline_args);
8434 /* FIRST CODE BLOCK */
8435 NEW_BBLOCK (cfg, tblock);
8436 tblock->cil_code = ip;
8440 ADD_BBLOCK (cfg, tblock);
8442 if (cfg->method == method) {
8443 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8444 if (breakpoint_id) {
8445 MONO_INST_NEW (cfg, ins, OP_BREAK);
8446 MONO_ADD_INS (cfg->cbb, ins);
8450 /* we use a separate basic block for the initialization code */
8451 NEW_BBLOCK (cfg, init_localsbb);
8452 cfg->bb_init = init_localsbb;
8453 init_localsbb->real_offset = cfg->real_offset;
8454 start_bblock->next_bb = init_localsbb;
8455 init_localsbb->next_bb = cfg->cbb;
8456 link_bblock (cfg, start_bblock, init_localsbb);
8457 link_bblock (cfg, init_localsbb, cfg->cbb);
8459 cfg->cbb = init_localsbb;
8461 if (cfg->gsharedvt && cfg->method == method) {
8462 MonoGSharedVtMethodInfo *info;
8463 MonoInst *var, *locals_var;
8466 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8467 info->method = cfg->method;
8468 info->count_entries = 16;
8469 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8470 cfg->gsharedvt_info = info;
8472 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8473 /* prevent it from being register allocated */
8474 //var->flags |= MONO_INST_VOLATILE;
8475 cfg->gsharedvt_info_var = var;
8477 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8478 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8480 /* Allocate locals */
8481 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8482 /* prevent it from being register allocated */
8483 //locals_var->flags |= MONO_INST_VOLATILE;
8484 cfg->gsharedvt_locals_var = locals_var;
8486 dreg = alloc_ireg (cfg);
8487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8489 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8490 ins->dreg = locals_var->dreg;
8492 MONO_ADD_INS (cfg->cbb, ins);
8493 cfg->gsharedvt_locals_var_ins = ins;
8495 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8498 ins->flags |= MONO_INST_INIT;
8502 if (mono_security_core_clr_enabled ()) {
8503 /* check if this is native code, e.g. an icall or a p/invoke */
8504 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8505 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8507 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8508 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8510 /* if this ia a native call then it can only be JITted from platform code */
8511 if ((icall || pinvk) && method->klass && method->klass->image) {
8512 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8513 MonoException *ex = icall ? mono_get_exception_security () :
8514 mono_get_exception_method_access ();
8515 emit_throw_exception (cfg, ex);
8522 CHECK_CFG_EXCEPTION;
8524 if (header->code_size == 0)
8527 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8532 if (cfg->method == method)
8533 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8535 for (n = 0; n < header->num_locals; ++n) {
8536 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8541 /* We force the vtable variable here for all shared methods
8542 for the possibility that they might show up in a stack
8543 trace where their exact instantiation is needed. */
8544 if (cfg->gshared && method == cfg->method) {
8545 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8546 mini_method_get_context (method)->method_inst ||
8547 method->klass->valuetype) {
8548 mono_get_vtable_var (cfg);
8550 /* FIXME: Is there a better way to do this?
8551 We need the variable live for the duration
8552 of the whole method. */
8553 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8557 /* add a check for this != NULL to inlined methods */
8558 if (is_virtual_call) {
8561 NEW_ARGLOAD (cfg, arg_ins, 0);
8562 MONO_ADD_INS (cfg->cbb, arg_ins);
8563 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8566 skip_dead_blocks = !dont_verify;
8567 if (skip_dead_blocks) {
8568 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8573 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8574 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8577 start_new_bblock = 0;
8579 if (cfg->method == method)
8580 cfg->real_offset = ip - header->code;
8582 cfg->real_offset = inline_offset;
8587 if (start_new_bblock) {
8588 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8589 if (start_new_bblock == 2) {
8590 g_assert (ip == tblock->cil_code);
8592 GET_BBLOCK (cfg, tblock, ip);
8594 cfg->cbb->next_bb = tblock;
8596 start_new_bblock = 0;
8597 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8598 if (cfg->verbose_level > 3)
8599 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8600 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8604 g_slist_free (class_inits);
8607 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8608 link_bblock (cfg, cfg->cbb, tblock);
8609 if (sp != stack_start) {
8610 handle_stack_args (cfg, stack_start, sp - stack_start);
8612 CHECK_UNVERIFIABLE (cfg);
8614 cfg->cbb->next_bb = tblock;
8616 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8617 if (cfg->verbose_level > 3)
8618 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8619 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8622 g_slist_free (class_inits);
8627 if (skip_dead_blocks) {
8628 int ip_offset = ip - header->code;
8630 if (ip_offset == bb->end)
8634 int op_size = mono_opcode_size (ip, end);
8635 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8637 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8639 if (ip_offset + op_size == bb->end) {
8640 MONO_INST_NEW (cfg, ins, OP_NOP);
8641 MONO_ADD_INS (cfg->cbb, ins);
8642 start_new_bblock = 1;
8650 * Sequence points are points where the debugger can place a breakpoint.
8651 * Currently, we generate these automatically at points where the IL
8654 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8656 * Make methods interruptable at the beginning, and at the targets of
8657 * backward branches.
8658 * Also, do this at the start of every bblock in methods with clauses too,
8659 * to be able to handle instructions with inprecise control flow like
8661 * Backward branches are handled at the end of method-to-ir ().
8663 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8664 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8666 /* Avoid sequence points on empty IL like .volatile */
8667 // FIXME: Enable this
8668 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8669 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8670 if ((sp != stack_start) && !sym_seq_point)
8671 ins->flags |= MONO_INST_NONEMPTY_STACK;
8672 MONO_ADD_INS (cfg->cbb, ins);
8675 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8678 cfg->cbb->real_offset = cfg->real_offset;
8680 if ((cfg->method == method) && cfg->coverage_info) {
8681 guint32 cil_offset = ip - header->code;
8682 cfg->coverage_info->data [cil_offset].cil_code = ip;
8684 /* TODO: Use an increment here */
8685 #if defined(TARGET_X86)
8686 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8687 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8689 MONO_ADD_INS (cfg->cbb, ins);
8691 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8692 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8696 if (cfg->verbose_level > 3)
8697 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8701 if (seq_points && !sym_seq_points && sp != stack_start) {
8703 * The C# compiler uses these nops to notify the JIT that it should
8704 * insert seq points.
8706 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8707 MONO_ADD_INS (cfg->cbb, ins);
8709 if (cfg->keep_cil_nops)
8710 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8712 MONO_INST_NEW (cfg, ins, OP_NOP);
8714 MONO_ADD_INS (cfg->cbb, ins);
8717 if (should_insert_brekpoint (cfg->method)) {
8718 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8720 MONO_INST_NEW (cfg, ins, OP_NOP);
8723 MONO_ADD_INS (cfg->cbb, ins);
8729 CHECK_STACK_OVF (1);
8730 n = (*ip)-CEE_LDARG_0;
8732 EMIT_NEW_ARGLOAD (cfg, ins, n);
8740 CHECK_STACK_OVF (1);
8741 n = (*ip)-CEE_LDLOC_0;
8743 EMIT_NEW_LOCLOAD (cfg, ins, n);
8752 n = (*ip)-CEE_STLOC_0;
8755 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8757 emit_stloc_ir (cfg, sp, header, n);
8764 CHECK_STACK_OVF (1);
8767 EMIT_NEW_ARGLOAD (cfg, ins, n);
8773 CHECK_STACK_OVF (1);
8776 NEW_ARGLOADA (cfg, ins, n);
8777 MONO_ADD_INS (cfg->cbb, ins);
8787 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8789 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8794 CHECK_STACK_OVF (1);
8797 EMIT_NEW_LOCLOAD (cfg, ins, n);
8801 case CEE_LDLOCA_S: {
8802 unsigned char *tmp_ip;
8804 CHECK_STACK_OVF (1);
8805 CHECK_LOCAL (ip [1]);
8807 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8813 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8822 CHECK_LOCAL (ip [1]);
8823 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8825 emit_stloc_ir (cfg, sp, header, ip [1]);
8830 CHECK_STACK_OVF (1);
8831 EMIT_NEW_PCONST (cfg, ins, NULL);
8832 ins->type = STACK_OBJ;
8837 CHECK_STACK_OVF (1);
8838 EMIT_NEW_ICONST (cfg, ins, -1);
8851 CHECK_STACK_OVF (1);
8852 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8858 CHECK_STACK_OVF (1);
8860 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8866 CHECK_STACK_OVF (1);
8867 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8873 CHECK_STACK_OVF (1);
8874 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8875 ins->type = STACK_I8;
8876 ins->dreg = alloc_dreg (cfg, STACK_I8);
8878 ins->inst_l = (gint64)read64 (ip);
8879 MONO_ADD_INS (cfg->cbb, ins);
8885 gboolean use_aotconst = FALSE;
8887 #ifdef TARGET_POWERPC
8888 /* FIXME: Clean this up */
8889 if (cfg->compile_aot)
8890 use_aotconst = TRUE;
8893 /* FIXME: we should really allocate this only late in the compilation process */
8894 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8896 CHECK_STACK_OVF (1);
8902 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8904 dreg = alloc_freg (cfg);
8905 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8906 ins->type = cfg->r4_stack_type;
8908 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8909 ins->type = cfg->r4_stack_type;
8910 ins->dreg = alloc_dreg (cfg, STACK_R8);
8912 MONO_ADD_INS (cfg->cbb, ins);
8922 gboolean use_aotconst = FALSE;
8924 #ifdef TARGET_POWERPC
8925 /* FIXME: Clean this up */
8926 if (cfg->compile_aot)
8927 use_aotconst = TRUE;
8930 /* FIXME: we should really allocate this only late in the compilation process */
8931 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8933 CHECK_STACK_OVF (1);
8939 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8941 dreg = alloc_freg (cfg);
8942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8943 ins->type = STACK_R8;
8945 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8946 ins->type = STACK_R8;
8947 ins->dreg = alloc_dreg (cfg, STACK_R8);
8949 MONO_ADD_INS (cfg->cbb, ins);
8958 MonoInst *temp, *store;
8960 CHECK_STACK_OVF (1);
8964 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8965 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8967 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8970 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8983 if (sp [0]->type == STACK_R8)
8984 /* we need to pop the value from the x86 FP stack */
8985 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8990 MonoMethodSignature *fsig;
8993 INLINE_FAILURE ("jmp");
8994 GSHAREDVT_FAILURE (*ip);
8997 if (stack_start != sp)
8999 token = read32 (ip + 1);
9000 /* FIXME: check the signature matches */
9001 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9004 if (cfg->gshared && mono_method_check_context_used (cmethod))
9005 GENERIC_SHARING_FAILURE (CEE_JMP);
9007 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9009 fsig = mono_method_signature (cmethod);
9010 n = fsig->param_count + fsig->hasthis;
9011 if (cfg->llvm_only) {
9014 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9015 for (i = 0; i < n; ++i)
9016 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9017 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9019 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9020 * have to emit a normal return since llvm expects it.
9023 emit_setret (cfg, ins);
9024 MONO_INST_NEW (cfg, ins, OP_BR);
9025 ins->inst_target_bb = end_bblock;
9026 MONO_ADD_INS (cfg->cbb, ins);
9027 link_bblock (cfg, cfg->cbb, end_bblock);
9030 } else if (cfg->backend->have_op_tail_call) {
9031 /* Handle tail calls similarly to calls */
9034 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9035 call->method = cmethod;
9036 call->tail_call = TRUE;
9037 call->signature = mono_method_signature (cmethod);
9038 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9039 call->inst.inst_p0 = cmethod;
9040 for (i = 0; i < n; ++i)
9041 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9043 mono_arch_emit_call (cfg, call);
9044 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9045 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9047 for (i = 0; i < num_args; ++i)
9048 /* Prevent arguments from being optimized away */
9049 arg_array [i]->flags |= MONO_INST_VOLATILE;
9051 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9052 ins = (MonoInst*)call;
9053 ins->inst_p0 = cmethod;
9054 MONO_ADD_INS (cfg->cbb, ins);
9058 start_new_bblock = 1;
9063 MonoMethodSignature *fsig;
9066 token = read32 (ip + 1);
9070 //GSHAREDVT_FAILURE (*ip);
9075 fsig = mini_get_signature (method, token, generic_context);
9077 if (method->dynamic && fsig->pinvoke) {
9081 * This is a call through a function pointer using a pinvoke
9082 * signature. Have to create a wrapper and call that instead.
9083 * FIXME: This is very slow, need to create a wrapper at JIT time
9084 * instead based on the signature.
9086 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9087 EMIT_NEW_PCONST (cfg, args [1], fsig);
9089 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9092 n = fsig->param_count + fsig->hasthis;
9096 //g_assert (!virtual_ || fsig->hasthis);
9100 inline_costs += 10 * num_calls++;
9103 * Making generic calls out of gsharedvt methods.
9104 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9105 * patching gshared method addresses into a gsharedvt method.
9107 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9109 * We pass the address to the gsharedvt trampoline in the rgctx reg
9111 MonoInst *callee = addr;
9113 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9115 GSHAREDVT_FAILURE (*ip);
9119 GSHAREDVT_FAILURE (*ip);
9121 addr = emit_get_rgctx_sig (cfg, context_used,
9122 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9123 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9127 /* Prevent inlining of methods with indirect calls */
9128 INLINE_FAILURE ("indirect call");
9130 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9131 MonoJumpInfoType info_type;
9135 * Instead of emitting an indirect call, emit a direct call
9136 * with the contents of the aotconst as the patch info.
9138 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9139 info_type = (MonoJumpInfoType)addr->inst_c1;
9140 info_data = addr->inst_p0;
9142 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9143 info_data = addr->inst_right->inst_left;
9146 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9147 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9152 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9156 /* End of call, INS should contain the result of the call, if any */
9158 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9160 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9163 CHECK_CFG_EXCEPTION;
9167 constrained_class = NULL;
9171 case CEE_CALLVIRT: {
9172 MonoInst *addr = NULL;
9173 MonoMethodSignature *fsig = NULL;
9175 int virtual_ = *ip == CEE_CALLVIRT;
9176 gboolean pass_imt_from_rgctx = FALSE;
9177 MonoInst *imt_arg = NULL;
9178 MonoInst *keep_this_alive = NULL;
9179 gboolean pass_vtable = FALSE;
9180 gboolean pass_mrgctx = FALSE;
9181 MonoInst *vtable_arg = NULL;
9182 gboolean check_this = FALSE;
9183 gboolean supported_tail_call = FALSE;
9184 gboolean tail_call = FALSE;
9185 gboolean need_seq_point = FALSE;
9186 guint32 call_opcode = *ip;
9187 gboolean emit_widen = TRUE;
9188 gboolean push_res = TRUE;
9189 gboolean skip_ret = FALSE;
9190 gboolean delegate_invoke = FALSE;
9191 gboolean direct_icall = FALSE;
9192 gboolean constrained_partial_call = FALSE;
9193 MonoMethod *cil_method;
9196 token = read32 (ip + 1);
9200 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9203 cil_method = cmethod;
9205 if (constrained_class) {
9206 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9207 if (!mini_is_gsharedvt_klass (constrained_class)) {
9208 g_assert (!cmethod->klass->valuetype);
9209 if (!mini_type_is_reference (&constrained_class->byval_arg))
9210 constrained_partial_call = TRUE;
9214 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9215 if (cfg->verbose_level > 2)
9216 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9217 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9218 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9220 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9224 if (cfg->verbose_level > 2)
9225 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9227 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9229 * This is needed since get_method_constrained can't find
9230 * the method in klass representing a type var.
9231 * The type var is guaranteed to be a reference type in this
9234 if (!mini_is_gsharedvt_klass (constrained_class))
9235 g_assert (!cmethod->klass->valuetype);
9237 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9243 if (!cmethod || mono_loader_get_last_error ()) {
9244 if (mono_loader_get_last_error ()) {
9245 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9246 mono_error_set_from_loader_error (&cfg->error);
9252 if (!dont_verify && !cfg->skip_visibility) {
9253 MonoMethod *target_method = cil_method;
9254 if (method->is_inflated) {
9255 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9258 if (!mono_method_can_access_method (method_definition, target_method) &&
9259 !mono_method_can_access_method (method, cil_method))
9260 METHOD_ACCESS_FAILURE (method, cil_method);
9263 if (mono_security_core_clr_enabled ())
9264 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9266 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9267 /* MS.NET seems to silently convert this to a callvirt */
9272 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9273 * converts to a callvirt.
9275 * tests/bug-515884.il is an example of this behavior
9277 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9278 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9279 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9283 if (!cmethod->klass->inited)
9284 if (!mono_class_init (cmethod->klass))
9285 TYPE_LOAD_ERROR (cmethod->klass);
9287 fsig = mono_method_signature (cmethod);
9290 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9291 mini_class_is_system_array (cmethod->klass)) {
9292 array_rank = cmethod->klass->rank;
9293 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9294 direct_icall = TRUE;
9295 } else if (fsig->pinvoke) {
9296 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9297 fsig = mono_method_signature (wrapper);
9298 } else if (constrained_class) {
9300 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9304 if (cfg->llvm_only && !cfg->method->wrapper_type)
9305 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9307 /* See code below */
9308 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9309 MonoBasicBlock *tbb;
9311 GET_BBLOCK (cfg, tbb, ip + 5);
9312 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9314 * We want to extend the try block to cover the call, but we can't do it if the
9315 * call is made directly since its followed by an exception check.
9317 direct_icall = FALSE;
9321 mono_save_token_info (cfg, image, token, cil_method);
9323 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9324 need_seq_point = TRUE;
9326 /* Don't support calls made using type arguments for now */
9328 if (cfg->gsharedvt) {
9329 if (mini_is_gsharedvt_signature (fsig))
9330 GSHAREDVT_FAILURE (*ip);
9334 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9335 g_assert_not_reached ();
9337 n = fsig->param_count + fsig->hasthis;
9339 if (!cfg->gshared && cmethod->klass->generic_container)
9343 g_assert (!mono_method_check_context_used (cmethod));
9347 //g_assert (!virtual_ || fsig->hasthis);
9352 * We have the `constrained.' prefix opcode.
9354 if (constrained_class) {
9355 if (mini_is_gsharedvt_klass (constrained_class)) {
9356 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9357 /* The 'Own method' case below */
9358 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9359 /* 'The type parameter is instantiated as a reference type' case below. */
9361 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9362 CHECK_CFG_EXCEPTION;
9368 if (constrained_partial_call) {
9369 gboolean need_box = TRUE;
9372 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9373 * called method is not known at compile time either. The called method could end up being
9374 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9375 * to box the receiver.
9376 * A simple solution would be to box always and make a normal virtual call, but that would
9377 * be bad performance wise.
9379 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9381 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9386 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9387 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9388 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9389 ins->klass = constrained_class;
9390 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9391 CHECK_CFG_EXCEPTION;
9392 } else if (need_box) {
9394 MonoBasicBlock *is_ref_bb, *end_bb;
9395 MonoInst *nonbox_call;
9398 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9400 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9401 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9403 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9405 NEW_BBLOCK (cfg, is_ref_bb);
9406 NEW_BBLOCK (cfg, end_bb);
9408 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9413 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9415 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9418 MONO_START_BB (cfg, is_ref_bb);
9419 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9420 ins->klass = constrained_class;
9421 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9422 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9424 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9426 MONO_START_BB (cfg, end_bb);
9429 nonbox_call->dreg = ins->dreg;
9432 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9433 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9434 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9437 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9439 * The type parameter is instantiated as a valuetype,
9440 * but that type doesn't override the method we're
9441 * calling, so we need to box `this'.
9443 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9444 ins->klass = constrained_class;
9445 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9446 CHECK_CFG_EXCEPTION;
9447 } else if (!constrained_class->valuetype) {
9448 int dreg = alloc_ireg_ref (cfg);
9451 * The type parameter is instantiated as a reference
9452 * type. We have a managed pointer on the stack, so
9453 * we need to dereference it here.
9455 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9456 ins->type = STACK_OBJ;
9459 if (cmethod->klass->valuetype) {
9462 /* Interface method */
9465 mono_class_setup_vtable (constrained_class);
9466 CHECK_TYPELOAD (constrained_class);
9467 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9469 TYPE_LOAD_ERROR (constrained_class);
9470 slot = mono_method_get_vtable_slot (cmethod);
9472 TYPE_LOAD_ERROR (cmethod->klass);
9473 cmethod = constrained_class->vtable [ioffset + slot];
9475 if (cmethod->klass == mono_defaults.enum_class) {
9476 /* Enum implements some interfaces, so treat this as the first case */
9477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9478 ins->klass = constrained_class;
9479 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9480 CHECK_CFG_EXCEPTION;
9485 constrained_class = NULL;
9488 if (check_call_signature (cfg, fsig, sp))
9491 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9492 delegate_invoke = TRUE;
9494 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9495 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9496 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9504 * If the callee is a shared method, then its static cctor
9505 * might not get called after the call was patched.
9507 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9508 emit_class_init (cfg, cmethod->klass);
9509 CHECK_TYPELOAD (cmethod->klass);
9512 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9515 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9517 context_used = mini_method_check_context_used (cfg, cmethod);
9519 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9520 /* Generic method interface
9521 calls are resolved via a
9522 helper function and don't
9524 if (!cmethod_context || !cmethod_context->method_inst)
9525 pass_imt_from_rgctx = TRUE;
9529 * If a shared method calls another
9530 * shared method then the caller must
9531 * have a generic sharing context
9532 * because the magic trampoline
9533 * requires it. FIXME: We shouldn't
9534 * have to force the vtable/mrgctx
9535 * variable here. Instead there
9536 * should be a flag in the cfg to
9537 * request a generic sharing context.
9540 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9541 mono_get_vtable_var (cfg);
9546 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9548 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9550 CHECK_TYPELOAD (cmethod->klass);
9551 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9556 g_assert (!vtable_arg);
9558 if (!cfg->compile_aot) {
9560 * emit_get_rgctx_method () calls mono_class_vtable () so check
9561 * for type load errors before.
9563 mono_class_setup_vtable (cmethod->klass);
9564 CHECK_TYPELOAD (cmethod->klass);
9567 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9569 /* !marshalbyref is needed to properly handle generic methods + remoting */
9570 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9571 MONO_METHOD_IS_FINAL (cmethod)) &&
9572 !mono_class_is_marshalbyref (cmethod->klass)) {
9579 if (pass_imt_from_rgctx) {
9580 g_assert (!pass_vtable);
9582 imt_arg = emit_get_rgctx_method (cfg, context_used,
9583 cmethod, MONO_RGCTX_INFO_METHOD);
9587 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9589 /* Calling virtual generic methods */
9590 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9591 !(MONO_METHOD_IS_FINAL (cmethod) &&
9592 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9593 fsig->generic_param_count &&
9594 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9596 MonoInst *this_temp, *this_arg_temp, *store;
9597 MonoInst *iargs [4];
9599 g_assert (fsig->is_inflated);
9601 /* Prevent inlining of methods that contain indirect calls */
9602 INLINE_FAILURE ("virtual generic call");
9604 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9605 GSHAREDVT_FAILURE (*ip);
9607 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9608 g_assert (!imt_arg);
9610 g_assert (cmethod->is_inflated);
9611 imt_arg = emit_get_rgctx_method (cfg, context_used,
9612 cmethod, MONO_RGCTX_INFO_METHOD);
9613 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9615 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9616 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9617 MONO_ADD_INS (cfg->cbb, store);
9619 /* FIXME: This should be a managed pointer */
9620 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9622 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9623 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9624 cmethod, MONO_RGCTX_INFO_METHOD);
9625 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9626 addr = mono_emit_jit_icall (cfg,
9627 mono_helper_compile_generic_method, iargs);
9629 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9631 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9638 * Implement a workaround for the inherent races involved in locking:
9644 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9645 * try block, the Exit () won't be executed, see:
9646 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9647 * To work around this, we extend such try blocks to include the last x bytes
9648 * of the Monitor.Enter () call.
9650 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9651 MonoBasicBlock *tbb;
9653 GET_BBLOCK (cfg, tbb, ip + 5);
9655 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9656 * from Monitor.Enter like ArgumentNullException.
9658 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9659 /* Mark this bblock as needing to be extended */
9660 tbb->extend_try_block = TRUE;
9664 /* Conversion to a JIT intrinsic */
9665 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9666 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9667 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9674 if ((cfg->opt & MONO_OPT_INLINE) &&
9675 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9676 mono_method_check_inlining (cfg, cmethod)) {
9678 gboolean always = FALSE;
9680 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9681 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9682 /* Prevent inlining of methods that call wrappers */
9683 INLINE_FAILURE ("wrapper call");
9684 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9688 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9690 cfg->real_offset += 5;
9692 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9693 /* *sp is already set by inline_method */
9698 inline_costs += costs;
9704 /* Tail recursion elimination */
9705 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9706 gboolean has_vtargs = FALSE;
9709 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9710 INLINE_FAILURE ("tail call");
9712 /* keep it simple */
9713 for (i = fsig->param_count - 1; i >= 0; i--) {
9714 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9719 for (i = 0; i < n; ++i)
9720 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9721 MONO_INST_NEW (cfg, ins, OP_BR);
9722 MONO_ADD_INS (cfg->cbb, ins);
9723 tblock = start_bblock->out_bb [0];
9724 link_bblock (cfg, cfg->cbb, tblock);
9725 ins->inst_target_bb = tblock;
9726 start_new_bblock = 1;
9728 /* skip the CEE_RET, too */
9729 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9736 inline_costs += 10 * num_calls++;
9739 * Making generic calls out of gsharedvt methods.
9740 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9741 * patching gshared method addresses into a gsharedvt method.
9743 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9744 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9745 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9746 MonoRgctxInfoType info_type;
9749 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9750 //GSHAREDVT_FAILURE (*ip);
9751 // disable for possible remoting calls
9752 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9753 GSHAREDVT_FAILURE (*ip);
9754 if (fsig->generic_param_count) {
9755 /* virtual generic call */
9756 g_assert (!imt_arg);
9757 /* Same as the virtual generic case above */
9758 imt_arg = emit_get_rgctx_method (cfg, context_used,
9759 cmethod, MONO_RGCTX_INFO_METHOD);
9760 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9762 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9763 /* This can happen when we call a fully instantiated iface method */
9764 imt_arg = emit_get_rgctx_method (cfg, context_used,
9765 cmethod, MONO_RGCTX_INFO_METHOD);
9770 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9771 keep_this_alive = sp [0];
9773 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9774 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9776 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9777 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9779 if (cfg->llvm_only) {
9780 // FIXME: Avoid initializing vtable_arg
9781 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9783 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9788 /* Generic sharing */
9791 * Use this if the callee is gsharedvt sharable too, since
9792 * at runtime we might find an instantiation so the call cannot
9793 * be patched (the 'no_patch' code path in mini-trampolines.c).
9795 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9796 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9797 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9798 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9799 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9800 INLINE_FAILURE ("gshared");
9802 g_assert (cfg->gshared && cmethod);
9806 * We are compiling a call to a
9807 * generic method from shared code,
9808 * which means that we have to look up
9809 * the method in the rgctx and do an
9813 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9815 if (cfg->llvm_only) {
9816 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9817 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9819 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9820 // FIXME: Avoid initializing imt_arg/vtable_arg
9821 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9823 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9824 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9829 /* Direct calls to icalls */
9831 MonoMethod *wrapper;
9834 /* Inline the wrapper */
9835 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9837 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9838 g_assert (costs > 0);
9839 cfg->real_offset += 5;
9841 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9842 /* *sp is already set by inline_method */
9847 inline_costs += costs;
9856 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9857 MonoInst *val = sp [fsig->param_count];
9859 if (val->type == STACK_OBJ) {
9860 MonoInst *iargs [2];
9865 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9868 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9869 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9870 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9871 emit_write_barrier (cfg, addr, val);
9872 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9873 GSHAREDVT_FAILURE (*ip);
9874 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9875 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9877 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9878 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9879 if (!cmethod->klass->element_class->valuetype && !readonly)
9880 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9881 CHECK_TYPELOAD (cmethod->klass);
9884 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9887 g_assert_not_reached ();
9894 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9898 /* Tail prefix / tail call optimization */
9900 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9901 /* FIXME: runtime generic context pointer for jumps? */
9902 /* FIXME: handle this for generic sharing eventually */
9903 if ((ins_flag & MONO_INST_TAILCALL) &&
9904 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9905 supported_tail_call = TRUE;
9907 if (supported_tail_call) {
9910 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9911 INLINE_FAILURE ("tail call");
9913 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9915 if (cfg->backend->have_op_tail_call) {
9916 /* Handle tail calls similarly to normal calls */
9919 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9921 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9922 call->tail_call = TRUE;
9923 call->method = cmethod;
9924 call->signature = mono_method_signature (cmethod);
9927 * We implement tail calls by storing the actual arguments into the
9928 * argument variables, then emitting a CEE_JMP.
9930 for (i = 0; i < n; ++i) {
9931 /* Prevent argument from being register allocated */
9932 arg_array [i]->flags |= MONO_INST_VOLATILE;
9933 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9935 ins = (MonoInst*)call;
9936 ins->inst_p0 = cmethod;
9937 ins->inst_p1 = arg_array [0];
9938 MONO_ADD_INS (cfg->cbb, ins);
9939 link_bblock (cfg, cfg->cbb, end_bblock);
9940 start_new_bblock = 1;
9942 // FIXME: Eliminate unreachable epilogs
9945 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9946 * only reachable from this call.
9948 GET_BBLOCK (cfg, tblock, ip + 5);
9949 if (tblock == cfg->cbb || tblock->in_count == 0)
9958 * Synchronized wrappers.
9959 * Its hard to determine where to replace a method with its synchronized
9960 * wrapper without causing an infinite recursion. The current solution is
9961 * to add the synchronized wrapper in the trampolines, and to
9962 * change the called method to a dummy wrapper, and resolve that wrapper
9963 * to the real method in mono_jit_compile_method ().
9965 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9966 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9967 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9968 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9972 * Virtual calls in llvm-only mode.
9974 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9975 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9980 INLINE_FAILURE ("call");
9981 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9982 imt_arg, vtable_arg);
9984 if (tail_call && !cfg->llvm_only) {
9985 link_bblock (cfg, cfg->cbb, end_bblock);
9986 start_new_bblock = 1;
9988 // FIXME: Eliminate unreachable epilogs
9991 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9992 * only reachable from this call.
9994 GET_BBLOCK (cfg, tblock, ip + 5);
9995 if (tblock == cfg->cbb || tblock->in_count == 0)
10002 /* End of call, INS should contain the result of the call, if any */
10004 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10007 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10012 if (keep_this_alive) {
10013 MonoInst *dummy_use;
10015 /* See mono_emit_method_call_full () */
10016 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10019 CHECK_CFG_EXCEPTION;
10023 g_assert (*ip == CEE_RET);
10027 constrained_class = NULL;
10028 if (need_seq_point)
10029 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10033 if (cfg->method != method) {
10034 /* return from inlined method */
10036 * If in_count == 0, that means the ret is unreachable due to
10037 * being preceeded by a throw. In that case, inline_method () will
10038 * handle setting the return value
10039 * (test case: test_0_inline_throw ()).
10041 if (return_var && cfg->cbb->in_count) {
10042 MonoType *ret_type = mono_method_signature (method)->ret;
10048 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10051 //g_assert (returnvar != -1);
10052 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10053 cfg->ret_var_set = TRUE;
10056 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10058 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10059 emit_pop_lmf (cfg);
10062 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10064 if (seq_points && !sym_seq_points) {
10066 * Place a seq point here too even through the IL stack is not
10067 * empty, so a step over on
10070 * will work correctly.
10072 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10073 MONO_ADD_INS (cfg->cbb, ins);
10076 g_assert (!return_var);
10080 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10083 emit_setret (cfg, *sp);
10086 if (sp != stack_start)
10088 MONO_INST_NEW (cfg, ins, OP_BR);
10090 ins->inst_target_bb = end_bblock;
10091 MONO_ADD_INS (cfg->cbb, ins);
10092 link_bblock (cfg, cfg->cbb, end_bblock);
10093 start_new_bblock = 1;
10097 MONO_INST_NEW (cfg, ins, OP_BR);
10099 target = ip + 1 + (signed char)(*ip);
10101 GET_BBLOCK (cfg, tblock, target);
10102 link_bblock (cfg, cfg->cbb, tblock);
10103 ins->inst_target_bb = tblock;
10104 if (sp != stack_start) {
10105 handle_stack_args (cfg, stack_start, sp - stack_start);
10107 CHECK_UNVERIFIABLE (cfg);
10109 MONO_ADD_INS (cfg->cbb, ins);
10110 start_new_bblock = 1;
10111 inline_costs += BRANCH_COST;
10125 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10127 target = ip + 1 + *(signed char*)ip;
10130 ADD_BINCOND (NULL);
10133 inline_costs += BRANCH_COST;
10137 MONO_INST_NEW (cfg, ins, OP_BR);
10140 target = ip + 4 + (gint32)read32(ip);
10142 GET_BBLOCK (cfg, tblock, target);
10143 link_bblock (cfg, cfg->cbb, tblock);
10144 ins->inst_target_bb = tblock;
10145 if (sp != stack_start) {
10146 handle_stack_args (cfg, stack_start, sp - stack_start);
10148 CHECK_UNVERIFIABLE (cfg);
10151 MONO_ADD_INS (cfg->cbb, ins);
10153 start_new_bblock = 1;
10154 inline_costs += BRANCH_COST;
10156 case CEE_BRFALSE_S:
10161 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10162 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10163 guint32 opsize = is_short ? 1 : 4;
10165 CHECK_OPSIZE (opsize);
10167 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10170 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10175 GET_BBLOCK (cfg, tblock, target);
10176 link_bblock (cfg, cfg->cbb, tblock);
10177 GET_BBLOCK (cfg, tblock, ip);
10178 link_bblock (cfg, cfg->cbb, tblock);
10180 if (sp != stack_start) {
10181 handle_stack_args (cfg, stack_start, sp - stack_start);
10182 CHECK_UNVERIFIABLE (cfg);
10185 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10186 cmp->sreg1 = sp [0]->dreg;
10187 type_from_op (cfg, cmp, sp [0], NULL);
10190 #if SIZEOF_REGISTER == 4
10191 if (cmp->opcode == OP_LCOMPARE_IMM) {
10192 /* Convert it to OP_LCOMPARE */
10193 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10194 ins->type = STACK_I8;
10195 ins->dreg = alloc_dreg (cfg, STACK_I8);
10197 MONO_ADD_INS (cfg->cbb, ins);
10198 cmp->opcode = OP_LCOMPARE;
10199 cmp->sreg2 = ins->dreg;
10202 MONO_ADD_INS (cfg->cbb, cmp);
10204 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10205 type_from_op (cfg, ins, sp [0], NULL);
10206 MONO_ADD_INS (cfg->cbb, ins);
10207 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10208 GET_BBLOCK (cfg, tblock, target);
10209 ins->inst_true_bb = tblock;
10210 GET_BBLOCK (cfg, tblock, ip);
10211 ins->inst_false_bb = tblock;
10212 start_new_bblock = 2;
10215 inline_costs += BRANCH_COST;
10230 MONO_INST_NEW (cfg, ins, *ip);
10232 target = ip + 4 + (gint32)read32(ip);
10235 ADD_BINCOND (NULL);
10238 inline_costs += BRANCH_COST;
10242 MonoBasicBlock **targets;
10243 MonoBasicBlock *default_bblock;
10244 MonoJumpInfoBBTable *table;
10245 int offset_reg = alloc_preg (cfg);
10246 int target_reg = alloc_preg (cfg);
10247 int table_reg = alloc_preg (cfg);
10248 int sum_reg = alloc_preg (cfg);
10249 gboolean use_op_switch;
10253 n = read32 (ip + 1);
10256 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10260 CHECK_OPSIZE (n * sizeof (guint32));
10261 target = ip + n * sizeof (guint32);
10263 GET_BBLOCK (cfg, default_bblock, target);
10264 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10266 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10267 for (i = 0; i < n; ++i) {
10268 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10269 targets [i] = tblock;
10270 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10274 if (sp != stack_start) {
10276 * Link the current bb with the targets as well, so handle_stack_args
10277 * will set their in_stack correctly.
10279 link_bblock (cfg, cfg->cbb, default_bblock);
10280 for (i = 0; i < n; ++i)
10281 link_bblock (cfg, cfg->cbb, targets [i]);
10283 handle_stack_args (cfg, stack_start, sp - stack_start);
10285 CHECK_UNVERIFIABLE (cfg);
10287 /* Undo the links */
10288 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10289 for (i = 0; i < n; ++i)
10290 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10296 for (i = 0; i < n; ++i)
10297 link_bblock (cfg, cfg->cbb, targets [i]);
10299 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10300 table->table = targets;
10301 table->table_size = n;
10303 use_op_switch = FALSE;
10305 /* ARM implements SWITCH statements differently */
10306 /* FIXME: Make it use the generic implementation */
10307 if (!cfg->compile_aot)
10308 use_op_switch = TRUE;
10311 if (COMPILE_LLVM (cfg))
10312 use_op_switch = TRUE;
10314 cfg->cbb->has_jump_table = 1;
10316 if (use_op_switch) {
10317 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10318 ins->sreg1 = src1->dreg;
10319 ins->inst_p0 = table;
10320 ins->inst_many_bb = targets;
10321 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10322 MONO_ADD_INS (cfg->cbb, ins);
10324 if (sizeof (gpointer) == 8)
10325 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10329 #if SIZEOF_REGISTER == 8
10330 /* The upper word might not be zero, and we add it to a 64 bit address later */
10331 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10334 if (cfg->compile_aot) {
10335 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10337 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10338 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10339 ins->inst_p0 = table;
10340 ins->dreg = table_reg;
10341 MONO_ADD_INS (cfg->cbb, ins);
10344 /* FIXME: Use load_memindex */
10345 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10347 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10349 start_new_bblock = 1;
10350 inline_costs += (BRANCH_COST * 2);
10363 case CEE_LDIND_REF:
10370 dreg = alloc_freg (cfg);
10373 dreg = alloc_lreg (cfg);
10375 case CEE_LDIND_REF:
10376 dreg = alloc_ireg_ref (cfg);
10379 dreg = alloc_preg (cfg);
10382 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10383 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10384 if (*ip == CEE_LDIND_R4)
10385 ins->type = cfg->r4_stack_type;
10386 ins->flags |= ins_flag;
10387 MONO_ADD_INS (cfg->cbb, ins);
10389 if (ins_flag & MONO_INST_VOLATILE) {
10390 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10391 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10396 case CEE_STIND_REF:
10407 if (ins_flag & MONO_INST_VOLATILE) {
10408 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10409 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10412 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10413 ins->flags |= ins_flag;
10416 MONO_ADD_INS (cfg->cbb, ins);
10418 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10419 emit_write_barrier (cfg, sp [0], sp [1]);
10428 MONO_INST_NEW (cfg, ins, (*ip));
10430 ins->sreg1 = sp [0]->dreg;
10431 ins->sreg2 = sp [1]->dreg;
10432 type_from_op (cfg, ins, sp [0], sp [1]);
10434 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10436 /* Use the immediate opcodes if possible */
10437 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10438 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10439 if (imm_opcode != -1) {
10440 ins->opcode = imm_opcode;
10441 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10444 NULLIFY_INS (sp [1]);
10448 MONO_ADD_INS ((cfg)->cbb, (ins));
10450 *sp++ = mono_decompose_opcode (cfg, ins);
10467 MONO_INST_NEW (cfg, ins, (*ip));
10469 ins->sreg1 = sp [0]->dreg;
10470 ins->sreg2 = sp [1]->dreg;
10471 type_from_op (cfg, ins, sp [0], sp [1]);
10473 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10474 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10476 /* FIXME: Pass opcode to is_inst_imm */
10478 /* Use the immediate opcodes if possible */
10479 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10480 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10481 if (imm_opcode != -1) {
10482 ins->opcode = imm_opcode;
10483 if (sp [1]->opcode == OP_I8CONST) {
10484 #if SIZEOF_REGISTER == 8
10485 ins->inst_imm = sp [1]->inst_l;
10487 ins->inst_ls_word = sp [1]->inst_ls_word;
10488 ins->inst_ms_word = sp [1]->inst_ms_word;
10492 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10495 /* Might be followed by an instruction added by add_widen_op */
10496 if (sp [1]->next == NULL)
10497 NULLIFY_INS (sp [1]);
10500 MONO_ADD_INS ((cfg)->cbb, (ins));
10502 *sp++ = mono_decompose_opcode (cfg, ins);
10515 case CEE_CONV_OVF_I8:
10516 case CEE_CONV_OVF_U8:
10517 case CEE_CONV_R_UN:
10520 /* Special case this earlier so we have long constants in the IR */
10521 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10522 int data = sp [-1]->inst_c0;
10523 sp [-1]->opcode = OP_I8CONST;
10524 sp [-1]->type = STACK_I8;
10525 #if SIZEOF_REGISTER == 8
10526 if ((*ip) == CEE_CONV_U8)
10527 sp [-1]->inst_c0 = (guint32)data;
10529 sp [-1]->inst_c0 = data;
10531 sp [-1]->inst_ls_word = data;
10532 if ((*ip) == CEE_CONV_U8)
10533 sp [-1]->inst_ms_word = 0;
10535 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10537 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10544 case CEE_CONV_OVF_I4:
10545 case CEE_CONV_OVF_I1:
10546 case CEE_CONV_OVF_I2:
10547 case CEE_CONV_OVF_I:
10548 case CEE_CONV_OVF_U:
10551 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10552 ADD_UNOP (CEE_CONV_OVF_I8);
10559 case CEE_CONV_OVF_U1:
10560 case CEE_CONV_OVF_U2:
10561 case CEE_CONV_OVF_U4:
10564 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10565 ADD_UNOP (CEE_CONV_OVF_U8);
10572 case CEE_CONV_OVF_I1_UN:
10573 case CEE_CONV_OVF_I2_UN:
10574 case CEE_CONV_OVF_I4_UN:
10575 case CEE_CONV_OVF_I8_UN:
10576 case CEE_CONV_OVF_U1_UN:
10577 case CEE_CONV_OVF_U2_UN:
10578 case CEE_CONV_OVF_U4_UN:
10579 case CEE_CONV_OVF_U8_UN:
10580 case CEE_CONV_OVF_I_UN:
10581 case CEE_CONV_OVF_U_UN:
10588 CHECK_CFG_EXCEPTION;
10592 case CEE_ADD_OVF_UN:
10594 case CEE_MUL_OVF_UN:
10596 case CEE_SUB_OVF_UN:
10602 GSHAREDVT_FAILURE (*ip);
10605 token = read32 (ip + 1);
10606 klass = mini_get_class (method, token, generic_context);
10607 CHECK_TYPELOAD (klass);
10609 if (generic_class_is_reference_type (cfg, klass)) {
10610 MonoInst *store, *load;
10611 int dreg = alloc_ireg_ref (cfg);
10613 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10614 load->flags |= ins_flag;
10615 MONO_ADD_INS (cfg->cbb, load);
10617 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10618 store->flags |= ins_flag;
10619 MONO_ADD_INS (cfg->cbb, store);
10621 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10622 emit_write_barrier (cfg, sp [0], sp [1]);
10624 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10630 int loc_index = -1;
10636 token = read32 (ip + 1);
10637 klass = mini_get_class (method, token, generic_context);
10638 CHECK_TYPELOAD (klass);
10640 /* Optimize the common ldobj+stloc combination */
10643 loc_index = ip [6];
10650 loc_index = ip [5] - CEE_STLOC_0;
10657 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10658 CHECK_LOCAL (loc_index);
10660 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10661 ins->dreg = cfg->locals [loc_index]->dreg;
10662 ins->flags |= ins_flag;
10665 if (ins_flag & MONO_INST_VOLATILE) {
10666 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10667 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10673 /* Optimize the ldobj+stobj combination */
10674 /* The reference case ends up being a load+store anyway */
10675 /* Skip this if the operation is volatile. */
10676 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10681 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10688 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10689 ins->flags |= ins_flag;
10692 if (ins_flag & MONO_INST_VOLATILE) {
10693 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10694 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10703 CHECK_STACK_OVF (1);
10705 n = read32 (ip + 1);
10707 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10708 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10709 ins->type = STACK_OBJ;
10712 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10713 MonoInst *iargs [1];
10714 char *str = (char *)mono_method_get_wrapper_data (method, n);
10716 if (cfg->compile_aot)
10717 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10719 EMIT_NEW_PCONST (cfg, iargs [0], str);
10720 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10722 if (cfg->opt & MONO_OPT_SHARED) {
10723 MonoInst *iargs [3];
10725 if (cfg->compile_aot) {
10726 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10728 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10729 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10730 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10731 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10732 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10734 if (cfg->cbb->out_of_line) {
10735 MonoInst *iargs [2];
10737 if (image == mono_defaults.corlib) {
10739 * Avoid relocations in AOT and save some space by using a
10740 * version of helper_ldstr specialized to mscorlib.
10742 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10743 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10745 /* Avoid creating the string object */
10746 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10747 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10748 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10752 if (cfg->compile_aot) {
10753 NEW_LDSTRCONST (cfg, ins, image, n);
10755 MONO_ADD_INS (cfg->cbb, ins);
10758 NEW_PCONST (cfg, ins, NULL);
10759 ins->type = STACK_OBJ;
10760 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10762 OUT_OF_MEMORY_FAILURE;
10765 MONO_ADD_INS (cfg->cbb, ins);
10774 MonoInst *iargs [2];
10775 MonoMethodSignature *fsig;
10778 MonoInst *vtable_arg = NULL;
10781 token = read32 (ip + 1);
10782 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10785 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10788 mono_save_token_info (cfg, image, token, cmethod);
10790 if (!mono_class_init (cmethod->klass))
10791 TYPE_LOAD_ERROR (cmethod->klass);
10793 context_used = mini_method_check_context_used (cfg, cmethod);
10795 if (mono_security_core_clr_enabled ())
10796 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10798 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10799 emit_class_init (cfg, cmethod->klass);
10800 CHECK_TYPELOAD (cmethod->klass);
10804 if (cfg->gsharedvt) {
10805 if (mini_is_gsharedvt_variable_signature (sig))
10806 GSHAREDVT_FAILURE (*ip);
10810 n = fsig->param_count;
10814 * Generate smaller code for the common newobj <exception> instruction in
10815 * argument checking code.
10817 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10818 is_exception_class (cmethod->klass) && n <= 2 &&
10819 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10820 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10821 MonoInst *iargs [3];
10825 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10828 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10831 iargs [1] = sp [0];
10832 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10835 iargs [1] = sp [0];
10836 iargs [2] = sp [1];
10837 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10840 g_assert_not_reached ();
10848 /* move the args to allow room for 'this' in the first position */
10854 /* check_call_signature () requires sp[0] to be set */
10855 this_ins.type = STACK_OBJ;
10856 sp [0] = &this_ins;
10857 if (check_call_signature (cfg, fsig, sp))
10862 if (mini_class_is_system_array (cmethod->klass)) {
10863 *sp = emit_get_rgctx_method (cfg, context_used,
10864 cmethod, MONO_RGCTX_INFO_METHOD);
10866 /* Avoid varargs in the common case */
10867 if (fsig->param_count == 1)
10868 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10869 else if (fsig->param_count == 2)
10870 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10871 else if (fsig->param_count == 3)
10872 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10873 else if (fsig->param_count == 4)
10874 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10876 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10877 } else if (cmethod->string_ctor) {
10878 g_assert (!context_used);
10879 g_assert (!vtable_arg);
10880 /* we simply pass a null pointer */
10881 EMIT_NEW_PCONST (cfg, *sp, NULL);
10882 /* now call the string ctor */
10883 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10885 if (cmethod->klass->valuetype) {
10886 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10887 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10888 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10893 * The code generated by mini_emit_virtual_call () expects
10894 * iargs [0] to be a boxed instance, but luckily the vcall
10895 * will be transformed into a normal call there.
10897 } else if (context_used) {
10898 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10901 MonoVTable *vtable = NULL;
10903 if (!cfg->compile_aot)
10904 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10905 CHECK_TYPELOAD (cmethod->klass);
10908 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10909 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10910 * As a workaround, we call class cctors before allocating objects.
10912 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10913 emit_class_init (cfg, cmethod->klass);
10914 if (cfg->verbose_level > 2)
10915 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10916 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10919 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10922 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10925 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10927 /* Now call the actual ctor */
10928 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10929 CHECK_CFG_EXCEPTION;
10932 if (alloc == NULL) {
10934 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10935 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10943 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10944 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10947 case CEE_CASTCLASS:
10951 token = read32 (ip + 1);
10952 klass = mini_get_class (method, token, generic_context);
10953 CHECK_TYPELOAD (klass);
10954 if (sp [0]->type != STACK_OBJ)
10957 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10958 CHECK_CFG_EXCEPTION;
10967 token = read32 (ip + 1);
10968 klass = mini_get_class (method, token, generic_context);
10969 CHECK_TYPELOAD (klass);
10970 if (sp [0]->type != STACK_OBJ)
10973 context_used = mini_class_check_context_used (cfg, klass);
10975 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10976 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10977 MonoInst *args [3];
10984 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10987 idx = get_castclass_cache_idx (cfg);
10988 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10990 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10993 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10994 MonoMethod *mono_isinst;
10995 MonoInst *iargs [1];
10998 mono_isinst = mono_marshal_get_isinst (klass);
10999 iargs [0] = sp [0];
11001 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
11002 iargs, ip, cfg->real_offset, TRUE);
11003 CHECK_CFG_EXCEPTION;
11004 g_assert (costs > 0);
11007 cfg->real_offset += 5;
11011 inline_costs += costs;
11014 ins = handle_isinst (cfg, klass, *sp, context_used);
11015 CHECK_CFG_EXCEPTION;
11021 case CEE_UNBOX_ANY: {
11022 MonoInst *res, *addr;
11027 token = read32 (ip + 1);
11028 klass = mini_get_class (method, token, generic_context);
11029 CHECK_TYPELOAD (klass);
11031 mono_save_token_info (cfg, image, token, klass);
11033 context_used = mini_class_check_context_used (cfg, klass);
11035 if (mini_is_gsharedvt_klass (klass)) {
11036 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11038 } else if (generic_class_is_reference_type (cfg, klass)) {
11039 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11040 CHECK_CFG_EXCEPTION;
11041 } else if (mono_class_is_nullable (klass)) {
11042 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11044 addr = handle_unbox (cfg, klass, sp, context_used);
11046 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11057 MonoClass *enum_class;
11058 MonoMethod *has_flag;
11064 token = read32 (ip + 1);
11065 klass = mini_get_class (method, token, generic_context);
11066 CHECK_TYPELOAD (klass);
11068 mono_save_token_info (cfg, image, token, klass);
11070 context_used = mini_class_check_context_used (cfg, klass);
11072 if (generic_class_is_reference_type (cfg, klass)) {
11078 if (klass == mono_defaults.void_class)
11080 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11082 /* frequent check in generic code: box (struct), brtrue */
11087 * <push int/long ptr>
11090 * constrained. MyFlags
11091 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11093 * If we find this sequence and the operand types on box and constrained
11094 * are equal, we can emit a specialized instruction sequence instead of
11095 * the very slow HasFlag () call.
11097 if ((cfg->opt & MONO_OPT_INTRINS) &&
11098 /* Cheap checks first. */
11099 ip + 5 + 6 + 5 < end &&
11100 ip [5] == CEE_PREFIX1 &&
11101 ip [6] == CEE_CONSTRAINED_ &&
11102 ip [11] == CEE_CALLVIRT &&
11103 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11104 mono_class_is_enum (klass) &&
11105 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11106 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11107 has_flag->klass == mono_defaults.enum_class &&
11108 !strcmp (has_flag->name, "HasFlag") &&
11109 has_flag->signature->hasthis &&
11110 has_flag->signature->param_count == 1) {
11111 CHECK_TYPELOAD (enum_class);
11113 if (enum_class == klass) {
11114 MonoInst *enum_this, *enum_flag;
11119 enum_this = sp [0];
11120 enum_flag = sp [1];
11122 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11127 // FIXME: LLVM can't handle the inconsistent bb linking
11128 if (!mono_class_is_nullable (klass) &&
11129 !mini_is_gsharedvt_klass (klass) &&
11130 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11131 (ip [5] == CEE_BRTRUE ||
11132 ip [5] == CEE_BRTRUE_S ||
11133 ip [5] == CEE_BRFALSE ||
11134 ip [5] == CEE_BRFALSE_S)) {
11135 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11137 MonoBasicBlock *true_bb, *false_bb;
11141 if (cfg->verbose_level > 3) {
11142 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11143 printf ("<box+brtrue opt>\n");
11148 case CEE_BRFALSE_S:
11151 target = ip + 1 + (signed char)(*ip);
11158 target = ip + 4 + (gint)(read32 (ip));
11162 g_assert_not_reached ();
11166 * We need to link both bblocks, since it is needed for handling stack
11167 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11168 * Branching to only one of them would lead to inconsistencies, so
11169 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11171 GET_BBLOCK (cfg, true_bb, target);
11172 GET_BBLOCK (cfg, false_bb, ip);
11174 mono_link_bblock (cfg, cfg->cbb, true_bb);
11175 mono_link_bblock (cfg, cfg->cbb, false_bb);
11177 if (sp != stack_start) {
11178 handle_stack_args (cfg, stack_start, sp - stack_start);
11180 CHECK_UNVERIFIABLE (cfg);
11183 if (COMPILE_LLVM (cfg)) {
11184 dreg = alloc_ireg (cfg);
11185 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11186 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11188 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11190 /* The JIT can't eliminate the iconst+compare */
11191 MONO_INST_NEW (cfg, ins, OP_BR);
11192 ins->inst_target_bb = is_true ? true_bb : false_bb;
11193 MONO_ADD_INS (cfg->cbb, ins);
11196 start_new_bblock = 1;
11200 *sp++ = handle_box (cfg, val, klass, context_used);
11202 CHECK_CFG_EXCEPTION;
11211 token = read32 (ip + 1);
11212 klass = mini_get_class (method, token, generic_context);
11213 CHECK_TYPELOAD (klass);
11215 mono_save_token_info (cfg, image, token, klass);
11217 context_used = mini_class_check_context_used (cfg, klass);
11219 if (mono_class_is_nullable (klass)) {
11222 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11223 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11227 ins = handle_unbox (cfg, klass, sp, context_used);
11240 MonoClassField *field;
11241 #ifndef DISABLE_REMOTING
11245 gboolean is_instance;
11247 gpointer addr = NULL;
11248 gboolean is_special_static;
11250 MonoInst *store_val = NULL;
11251 MonoInst *thread_ins;
11254 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11256 if (op == CEE_STFLD) {
11259 store_val = sp [1];
11264 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11266 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11269 if (op == CEE_STSFLD) {
11272 store_val = sp [0];
11277 token = read32 (ip + 1);
11278 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11279 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11280 klass = field->parent;
11283 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11286 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11287 FIELD_ACCESS_FAILURE (method, field);
11288 mono_class_init (klass);
11290 /* if the class is Critical then transparent code cannot access it's fields */
11291 if (!is_instance && mono_security_core_clr_enabled ())
11292 ensure_method_is_allowed_to_access_field (cfg, method, field);
11294 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11295 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11296 if (mono_security_core_clr_enabled ())
11297 ensure_method_is_allowed_to_access_field (cfg, method, field);
11300 ftype = mono_field_get_type (field);
11303 * LDFLD etc. is usable on static fields as well, so convert those cases to
11306 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11318 g_assert_not_reached ();
11320 is_instance = FALSE;
11323 context_used = mini_class_check_context_used (cfg, klass);
11325 /* INSTANCE CASE */
11327 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11328 if (op == CEE_STFLD) {
11329 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11331 #ifndef DISABLE_REMOTING
11332 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11333 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11334 MonoInst *iargs [5];
11336 GSHAREDVT_FAILURE (op);
11338 iargs [0] = sp [0];
11339 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11340 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11341 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11343 iargs [4] = sp [1];
11345 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11346 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11347 iargs, ip, cfg->real_offset, TRUE);
11348 CHECK_CFG_EXCEPTION;
11349 g_assert (costs > 0);
11351 cfg->real_offset += 5;
11353 inline_costs += costs;
11355 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11362 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11364 if (mini_is_gsharedvt_klass (klass)) {
11365 MonoInst *offset_ins;
11367 context_used = mini_class_check_context_used (cfg, klass);
11369 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11370 /* The value is offset by 1 */
11371 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11372 dreg = alloc_ireg_mp (cfg);
11373 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11374 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11375 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11377 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11379 if (sp [0]->opcode != OP_LDADDR)
11380 store->flags |= MONO_INST_FAULT;
11382 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11383 /* insert call to write barrier */
11387 dreg = alloc_ireg_mp (cfg);
11388 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11389 emit_write_barrier (cfg, ptr, sp [1]);
11392 store->flags |= ins_flag;
11399 #ifndef DISABLE_REMOTING
11400 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11401 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11402 MonoInst *iargs [4];
11404 GSHAREDVT_FAILURE (op);
11406 iargs [0] = sp [0];
11407 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11408 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11409 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11410 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11411 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11412 iargs, ip, cfg->real_offset, TRUE);
11413 CHECK_CFG_EXCEPTION;
11414 g_assert (costs > 0);
11416 cfg->real_offset += 5;
11420 inline_costs += costs;
11422 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11428 if (sp [0]->type == STACK_VTYPE) {
11431 /* Have to compute the address of the variable */
11433 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11435 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11437 g_assert (var->klass == klass);
11439 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11443 if (op == CEE_LDFLDA) {
11444 if (sp [0]->type == STACK_OBJ) {
11445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11446 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11449 dreg = alloc_ireg_mp (cfg);
11451 if (mini_is_gsharedvt_klass (klass)) {
11452 MonoInst *offset_ins;
11454 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11455 /* The value is offset by 1 */
11456 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11457 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11459 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11461 ins->klass = mono_class_from_mono_type (field->type);
11462 ins->type = STACK_MP;
11467 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11469 if (mini_is_gsharedvt_klass (klass)) {
11470 MonoInst *offset_ins;
11472 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11473 /* The value is offset by 1 */
11474 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11475 dreg = alloc_ireg_mp (cfg);
11476 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11479 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11481 load->flags |= ins_flag;
11482 if (sp [0]->opcode != OP_LDADDR)
11483 load->flags |= MONO_INST_FAULT;
11495 context_used = mini_class_check_context_used (cfg, klass);
11497 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11500 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11501 * to be called here.
11503 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11504 mono_class_vtable (cfg->domain, klass);
11505 CHECK_TYPELOAD (klass);
11507 mono_domain_lock (cfg->domain);
11508 if (cfg->domain->special_static_fields)
11509 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11510 mono_domain_unlock (cfg->domain);
11512 is_special_static = mono_class_field_is_special_static (field);
11514 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11515 thread_ins = mono_get_thread_intrinsic (cfg);
11519 /* Generate IR to compute the field address */
11520 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11522 * Fast access to TLS data
11523 * Inline version of get_thread_static_data () in
11527 int idx, static_data_reg, array_reg, dreg;
11529 GSHAREDVT_FAILURE (op);
11531 MONO_ADD_INS (cfg->cbb, thread_ins);
11532 static_data_reg = alloc_ireg (cfg);
11533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11535 if (cfg->compile_aot) {
11536 int offset_reg, offset2_reg, idx_reg;
11538 /* For TLS variables, this will return the TLS offset */
11539 EMIT_NEW_SFLDACONST (cfg, ins, field);
11540 offset_reg = ins->dreg;
11541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11542 idx_reg = alloc_ireg (cfg);
11543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11545 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11546 array_reg = alloc_ireg (cfg);
11547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11548 offset2_reg = alloc_ireg (cfg);
11549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11551 dreg = alloc_ireg (cfg);
11552 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11554 offset = (gsize)addr & 0x7fffffff;
11555 idx = offset & 0x3f;
11557 array_reg = alloc_ireg (cfg);
11558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11559 dreg = alloc_ireg (cfg);
11560 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11562 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11563 (cfg->compile_aot && is_special_static) ||
11564 (context_used && is_special_static)) {
11565 MonoInst *iargs [2];
11567 g_assert (field->parent);
11568 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11569 if (context_used) {
11570 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11571 field, MONO_RGCTX_INFO_CLASS_FIELD);
11573 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11575 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11576 } else if (context_used) {
11577 MonoInst *static_data;
11580 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11581 method->klass->name_space, method->klass->name, method->name,
11582 depth, field->offset);
11585 if (mono_class_needs_cctor_run (klass, method))
11586 emit_class_init (cfg, klass);
11589 * The pointer we're computing here is
11591 * super_info.static_data + field->offset
11593 static_data = emit_get_rgctx_klass (cfg, context_used,
11594 klass, MONO_RGCTX_INFO_STATIC_DATA);
11596 if (mini_is_gsharedvt_klass (klass)) {
11597 MonoInst *offset_ins;
11599 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11600 /* The value is offset by 1 */
11601 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11602 dreg = alloc_ireg_mp (cfg);
11603 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11604 } else if (field->offset == 0) {
11607 int addr_reg = mono_alloc_preg (cfg);
11608 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11610 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11611 MonoInst *iargs [2];
11613 g_assert (field->parent);
11614 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11615 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11616 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11618 MonoVTable *vtable = NULL;
11620 if (!cfg->compile_aot)
11621 vtable = mono_class_vtable (cfg->domain, klass);
11622 CHECK_TYPELOAD (klass);
11625 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11626 if (!(g_slist_find (class_inits, klass))) {
11627 emit_class_init (cfg, klass);
11628 if (cfg->verbose_level > 2)
11629 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11630 class_inits = g_slist_prepend (class_inits, klass);
11633 if (cfg->run_cctors) {
11634 /* This makes so that inline cannot trigger */
11635 /* .cctors: too many apps depend on them */
11636 /* running with a specific order... */
11638 if (! vtable->initialized)
11639 INLINE_FAILURE ("class init");
11640 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11641 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11642 g_assert_not_reached ();
11643 goto exception_exit;
11647 if (cfg->compile_aot)
11648 EMIT_NEW_SFLDACONST (cfg, ins, field);
11651 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11653 EMIT_NEW_PCONST (cfg, ins, addr);
11656 MonoInst *iargs [1];
11657 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11658 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11662 /* Generate IR to do the actual load/store operation */
11664 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11665 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11666 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11669 if (op == CEE_LDSFLDA) {
11670 ins->klass = mono_class_from_mono_type (ftype);
11671 ins->type = STACK_PTR;
11673 } else if (op == CEE_STSFLD) {
11676 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11677 store->flags |= ins_flag;
11679 gboolean is_const = FALSE;
11680 MonoVTable *vtable = NULL;
11681 gpointer addr = NULL;
11683 if (!context_used) {
11684 vtable = mono_class_vtable (cfg->domain, klass);
11685 CHECK_TYPELOAD (klass);
11687 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11688 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11689 int ro_type = ftype->type;
11691 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11692 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11693 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11696 GSHAREDVT_FAILURE (op);
11698 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11701 case MONO_TYPE_BOOLEAN:
11703 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11707 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11710 case MONO_TYPE_CHAR:
11712 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11716 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11721 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11725 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11730 case MONO_TYPE_PTR:
11731 case MONO_TYPE_FNPTR:
11732 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11733 type_to_eval_stack_type ((cfg), field->type, *sp);
11736 case MONO_TYPE_STRING:
11737 case MONO_TYPE_OBJECT:
11738 case MONO_TYPE_CLASS:
11739 case MONO_TYPE_SZARRAY:
11740 case MONO_TYPE_ARRAY:
11741 if (!mono_gc_is_moving ()) {
11742 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11743 type_to_eval_stack_type ((cfg), field->type, *sp);
11751 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11756 case MONO_TYPE_VALUETYPE:
11766 CHECK_STACK_OVF (1);
11768 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11769 load->flags |= ins_flag;
11775 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11776 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11777 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11788 token = read32 (ip + 1);
11789 klass = mini_get_class (method, token, generic_context);
11790 CHECK_TYPELOAD (klass);
11791 if (ins_flag & MONO_INST_VOLATILE) {
11792 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11793 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11795 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11796 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11797 ins->flags |= ins_flag;
11798 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11799 generic_class_is_reference_type (cfg, klass)) {
11800 /* insert call to write barrier */
11801 emit_write_barrier (cfg, sp [0], sp [1]);
11813 const char *data_ptr;
11815 guint32 field_token;
11821 token = read32 (ip + 1);
11823 klass = mini_get_class (method, token, generic_context);
11824 CHECK_TYPELOAD (klass);
11826 context_used = mini_class_check_context_used (cfg, klass);
11828 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11829 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11830 ins->sreg1 = sp [0]->dreg;
11831 ins->type = STACK_I4;
11832 ins->dreg = alloc_ireg (cfg);
11833 MONO_ADD_INS (cfg->cbb, ins);
11834 *sp = mono_decompose_opcode (cfg, ins);
11837 if (context_used) {
11838 MonoInst *args [3];
11839 MonoClass *array_class = mono_array_class_get (klass, 1);
11840 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11842 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11845 args [0] = emit_get_rgctx_klass (cfg, context_used,
11846 array_class, MONO_RGCTX_INFO_VTABLE);
11851 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11853 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11855 if (cfg->opt & MONO_OPT_SHARED) {
11856 /* Decompose now to avoid problems with references to the domainvar */
11857 MonoInst *iargs [3];
11859 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11860 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11861 iargs [2] = sp [0];
11863 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11865 /* Decompose later since it is needed by abcrem */
11866 MonoClass *array_type = mono_array_class_get (klass, 1);
11867 mono_class_vtable (cfg->domain, array_type);
11868 CHECK_TYPELOAD (array_type);
11870 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11871 ins->dreg = alloc_ireg_ref (cfg);
11872 ins->sreg1 = sp [0]->dreg;
11873 ins->inst_newa_class = klass;
11874 ins->type = STACK_OBJ;
11875 ins->klass = array_type;
11876 MONO_ADD_INS (cfg->cbb, ins);
11877 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11878 cfg->cbb->has_array_access = TRUE;
11880 /* Needed so mono_emit_load_get_addr () gets called */
11881 mono_get_got_var (cfg);
11891 * we inline/optimize the initialization sequence if possible.
11892 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11893 * for small sizes open code the memcpy
11894 * ensure the rva field is big enough
11896 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11897 MonoMethod *memcpy_method = get_memcpy_method ();
11898 MonoInst *iargs [3];
11899 int add_reg = alloc_ireg_mp (cfg);
11901 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11902 if (cfg->compile_aot) {
11903 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11905 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11907 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11908 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11917 if (sp [0]->type != STACK_OBJ)
11920 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11921 ins->dreg = alloc_preg (cfg);
11922 ins->sreg1 = sp [0]->dreg;
11923 ins->type = STACK_I4;
11924 /* This flag will be inherited by the decomposition */
11925 ins->flags |= MONO_INST_FAULT;
11926 MONO_ADD_INS (cfg->cbb, ins);
11927 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11928 cfg->cbb->has_array_access = TRUE;
11936 if (sp [0]->type != STACK_OBJ)
11939 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11941 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11942 CHECK_TYPELOAD (klass);
11943 /* we need to make sure that this array is exactly the type it needs
11944 * to be for correctness. the wrappers are lax with their usage
11945 * so we need to ignore them here
11947 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11948 MonoClass *array_class = mono_array_class_get (klass, 1);
11949 mini_emit_check_array_type (cfg, sp [0], array_class);
11950 CHECK_TYPELOAD (array_class);
11954 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11959 case CEE_LDELEM_I1:
11960 case CEE_LDELEM_U1:
11961 case CEE_LDELEM_I2:
11962 case CEE_LDELEM_U2:
11963 case CEE_LDELEM_I4:
11964 case CEE_LDELEM_U4:
11965 case CEE_LDELEM_I8:
11967 case CEE_LDELEM_R4:
11968 case CEE_LDELEM_R8:
11969 case CEE_LDELEM_REF: {
11975 if (*ip == CEE_LDELEM) {
11977 token = read32 (ip + 1);
11978 klass = mini_get_class (method, token, generic_context);
11979 CHECK_TYPELOAD (klass);
11980 mono_class_init (klass);
11983 klass = array_access_to_klass (*ip);
11985 if (sp [0]->type != STACK_OBJ)
11988 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11990 if (mini_is_gsharedvt_variable_klass (klass)) {
11991 // FIXME-VT: OP_ICONST optimization
11992 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11993 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11994 ins->opcode = OP_LOADV_MEMBASE;
11995 } else if (sp [1]->opcode == OP_ICONST) {
11996 int array_reg = sp [0]->dreg;
11997 int index_reg = sp [1]->dreg;
11998 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
12000 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
12001 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
12003 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12004 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12006 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12007 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12010 if (*ip == CEE_LDELEM)
12017 case CEE_STELEM_I1:
12018 case CEE_STELEM_I2:
12019 case CEE_STELEM_I4:
12020 case CEE_STELEM_I8:
12021 case CEE_STELEM_R4:
12022 case CEE_STELEM_R8:
12023 case CEE_STELEM_REF:
12028 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12030 if (*ip == CEE_STELEM) {
12032 token = read32 (ip + 1);
12033 klass = mini_get_class (method, token, generic_context);
12034 CHECK_TYPELOAD (klass);
12035 mono_class_init (klass);
12038 klass = array_access_to_klass (*ip);
12040 if (sp [0]->type != STACK_OBJ)
12043 emit_array_store (cfg, klass, sp, TRUE);
12045 if (*ip == CEE_STELEM)
12052 case CEE_CKFINITE: {
12056 if (cfg->llvm_only) {
12057 MonoInst *iargs [1];
12059 iargs [0] = sp [0];
12060 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12062 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12063 ins->sreg1 = sp [0]->dreg;
12064 ins->dreg = alloc_freg (cfg);
12065 ins->type = STACK_R8;
12066 MONO_ADD_INS (cfg->cbb, ins);
12068 *sp++ = mono_decompose_opcode (cfg, ins);
12074 case CEE_REFANYVAL: {
12075 MonoInst *src_var, *src;
12077 int klass_reg = alloc_preg (cfg);
12078 int dreg = alloc_preg (cfg);
12080 GSHAREDVT_FAILURE (*ip);
12083 MONO_INST_NEW (cfg, ins, *ip);
12086 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12087 CHECK_TYPELOAD (klass);
12089 context_used = mini_class_check_context_used (cfg, klass);
12092 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12094 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12095 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12098 if (context_used) {
12099 MonoInst *klass_ins;
12101 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12102 klass, MONO_RGCTX_INFO_KLASS);
12105 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12106 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12108 mini_emit_class_check (cfg, klass_reg, klass);
12110 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12111 ins->type = STACK_MP;
12112 ins->klass = klass;
12117 case CEE_MKREFANY: {
12118 MonoInst *loc, *addr;
12120 GSHAREDVT_FAILURE (*ip);
12123 MONO_INST_NEW (cfg, ins, *ip);
12126 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12127 CHECK_TYPELOAD (klass);
12129 context_used = mini_class_check_context_used (cfg, klass);
12131 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12132 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12134 if (context_used) {
12135 MonoInst *const_ins;
12136 int type_reg = alloc_preg (cfg);
12138 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12141 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12142 } else if (cfg->compile_aot) {
12143 int const_reg = alloc_preg (cfg);
12144 int type_reg = alloc_preg (cfg);
12146 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12149 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12151 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12152 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12154 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12156 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12157 ins->type = STACK_VTYPE;
12158 ins->klass = mono_defaults.typed_reference_class;
12163 case CEE_LDTOKEN: {
12165 MonoClass *handle_class;
12167 CHECK_STACK_OVF (1);
12170 n = read32 (ip + 1);
12172 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12173 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12174 handle = mono_method_get_wrapper_data (method, n);
12175 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12176 if (handle_class == mono_defaults.typehandle_class)
12177 handle = &((MonoClass*)handle)->byval_arg;
12180 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12185 mono_class_init (handle_class);
12186 if (cfg->gshared) {
12187 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12188 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12189 /* This case handles ldtoken
12190 of an open type, like for
12193 } else if (handle_class == mono_defaults.typehandle_class) {
12194 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12195 } else if (handle_class == mono_defaults.fieldhandle_class)
12196 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12197 else if (handle_class == mono_defaults.methodhandle_class)
12198 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12200 g_assert_not_reached ();
12203 if ((cfg->opt & MONO_OPT_SHARED) &&
12204 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12205 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12206 MonoInst *addr, *vtvar, *iargs [3];
12207 int method_context_used;
12209 method_context_used = mini_method_check_context_used (cfg, method);
12211 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12213 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12214 EMIT_NEW_ICONST (cfg, iargs [1], n);
12215 if (method_context_used) {
12216 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12217 method, MONO_RGCTX_INFO_METHOD);
12218 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12220 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12221 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12223 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12225 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12227 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12229 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12230 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12231 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12232 (cmethod->klass == mono_defaults.systemtype_class) &&
12233 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12234 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12236 mono_class_init (tclass);
12237 if (context_used) {
12238 ins = emit_get_rgctx_klass (cfg, context_used,
12239 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12240 } else if (cfg->compile_aot) {
12241 if (method->wrapper_type) {
12242 mono_error_init (&error); //got to do it since there are multiple conditionals below
12243 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12244 /* Special case for static synchronized wrappers */
12245 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12247 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12248 /* FIXME: n is not a normal token */
12250 EMIT_NEW_PCONST (cfg, ins, NULL);
12253 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12256 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12258 EMIT_NEW_PCONST (cfg, ins, rt);
12260 ins->type = STACK_OBJ;
12261 ins->klass = cmethod->klass;
12264 MonoInst *addr, *vtvar;
12266 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12268 if (context_used) {
12269 if (handle_class == mono_defaults.typehandle_class) {
12270 ins = emit_get_rgctx_klass (cfg, context_used,
12271 mono_class_from_mono_type ((MonoType *)handle),
12272 MONO_RGCTX_INFO_TYPE);
12273 } else if (handle_class == mono_defaults.methodhandle_class) {
12274 ins = emit_get_rgctx_method (cfg, context_used,
12275 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12276 } else if (handle_class == mono_defaults.fieldhandle_class) {
12277 ins = emit_get_rgctx_field (cfg, context_used,
12278 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12280 g_assert_not_reached ();
12282 } else if (cfg->compile_aot) {
12283 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12285 EMIT_NEW_PCONST (cfg, ins, handle);
12287 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12288 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12289 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12299 MONO_INST_NEW (cfg, ins, OP_THROW);
12301 ins->sreg1 = sp [0]->dreg;
12303 cfg->cbb->out_of_line = TRUE;
12304 MONO_ADD_INS (cfg->cbb, ins);
12305 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12306 MONO_ADD_INS (cfg->cbb, ins);
12309 link_bblock (cfg, cfg->cbb, end_bblock);
12310 start_new_bblock = 1;
12311 /* This can complicate code generation for llvm since the return value might not be defined */
12312 if (COMPILE_LLVM (cfg))
12313 INLINE_FAILURE ("throw");
12315 case CEE_ENDFINALLY:
12316 /* mono_save_seq_point_info () depends on this */
12317 if (sp != stack_start)
12318 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12319 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12320 MONO_ADD_INS (cfg->cbb, ins);
12322 start_new_bblock = 1;
12325 * Control will leave the method so empty the stack, otherwise
12326 * the next basic block will start with a nonempty stack.
12328 while (sp != stack_start) {
12333 case CEE_LEAVE_S: {
12336 if (*ip == CEE_LEAVE) {
12338 target = ip + 5 + (gint32)read32(ip + 1);
12341 target = ip + 2 + (signed char)(ip [1]);
12344 /* empty the stack */
12345 while (sp != stack_start) {
12350 * If this leave statement is in a catch block, check for a
12351 * pending exception, and rethrow it if necessary.
12352 * We avoid doing this in runtime invoke wrappers, since those are called
12353 * by native code which excepts the wrapper to catch all exceptions.
12355 for (i = 0; i < header->num_clauses; ++i) {
12356 MonoExceptionClause *clause = &header->clauses [i];
12359 * Use <= in the final comparison to handle clauses with multiple
12360 * leave statements, like in bug #78024.
12361 * The ordering of the exception clauses guarantees that we find the
12362 * innermost clause.
12364 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12366 MonoBasicBlock *dont_throw;
12371 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12374 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12376 NEW_BBLOCK (cfg, dont_throw);
12379 * Currently, we always rethrow the abort exception, despite the
12380 * fact that this is not correct. See thread6.cs for an example.
12381 * But propagating the abort exception is more important than
12382 * getting the sematics right.
12384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12386 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12388 MONO_START_BB (cfg, dont_throw);
12393 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12396 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12398 MonoExceptionClause *clause;
12400 for (tmp = handlers; tmp; tmp = tmp->next) {
12401 clause = (MonoExceptionClause *)tmp->data;
12402 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12404 link_bblock (cfg, cfg->cbb, tblock);
12405 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12406 ins->inst_target_bb = tblock;
12407 ins->inst_eh_block = clause;
12408 MONO_ADD_INS (cfg->cbb, ins);
12409 cfg->cbb->has_call_handler = 1;
12410 if (COMPILE_LLVM (cfg)) {
12411 MonoBasicBlock *target_bb;
12414 * Link the finally bblock with the target, since it will
12415 * conceptually branch there.
12417 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12418 GET_BBLOCK (cfg, target_bb, target);
12419 link_bblock (cfg, tblock, target_bb);
12422 g_list_free (handlers);
12425 MONO_INST_NEW (cfg, ins, OP_BR);
12426 MONO_ADD_INS (cfg->cbb, ins);
12427 GET_BBLOCK (cfg, tblock, target);
12428 link_bblock (cfg, cfg->cbb, tblock);
12429 ins->inst_target_bb = tblock;
12431 start_new_bblock = 1;
12433 if (*ip == CEE_LEAVE)
12442 * Mono specific opcodes
12444 case MONO_CUSTOM_PREFIX: {
12446 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12450 case CEE_MONO_ICALL: {
12452 MonoJitICallInfo *info;
12454 token = read32 (ip + 2);
12455 func = mono_method_get_wrapper_data (method, token);
12456 info = mono_find_jit_icall_by_addr (func);
12458 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12461 CHECK_STACK (info->sig->param_count);
12462 sp -= info->sig->param_count;
12464 ins = mono_emit_jit_icall (cfg, info->func, sp);
12465 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12469 inline_costs += 10 * num_calls++;
12473 case CEE_MONO_LDPTR_CARD_TABLE:
12474 case CEE_MONO_LDPTR_NURSERY_START:
12475 case CEE_MONO_LDPTR_NURSERY_BITS:
12476 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12477 CHECK_STACK_OVF (1);
12480 case CEE_MONO_LDPTR_CARD_TABLE:
12481 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12483 case CEE_MONO_LDPTR_NURSERY_START:
12484 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12486 case CEE_MONO_LDPTR_NURSERY_BITS:
12487 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12489 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12490 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12496 inline_costs += 10 * num_calls++;
12499 case CEE_MONO_LDPTR: {
12502 CHECK_STACK_OVF (1);
12504 token = read32 (ip + 2);
12506 ptr = mono_method_get_wrapper_data (method, token);
12507 EMIT_NEW_PCONST (cfg, ins, ptr);
12510 inline_costs += 10 * num_calls++;
12511 /* Can't embed random pointers into AOT code */
12515 case CEE_MONO_JIT_ICALL_ADDR: {
12516 MonoJitICallInfo *callinfo;
12519 CHECK_STACK_OVF (1);
12521 token = read32 (ip + 2);
12523 ptr = mono_method_get_wrapper_data (method, token);
12524 callinfo = mono_find_jit_icall_by_addr (ptr);
12525 g_assert (callinfo);
12526 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12529 inline_costs += 10 * num_calls++;
12532 case CEE_MONO_ICALL_ADDR: {
12533 MonoMethod *cmethod;
12536 CHECK_STACK_OVF (1);
12538 token = read32 (ip + 2);
12540 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12542 if (cfg->compile_aot) {
12543 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12545 ptr = mono_lookup_internal_call (cmethod);
12547 EMIT_NEW_PCONST (cfg, ins, ptr);
12553 case CEE_MONO_VTADDR: {
12554 MonoInst *src_var, *src;
12560 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12561 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12566 case CEE_MONO_NEWOBJ: {
12567 MonoInst *iargs [2];
12569 CHECK_STACK_OVF (1);
12571 token = read32 (ip + 2);
12572 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12573 mono_class_init (klass);
12574 NEW_DOMAINCONST (cfg, iargs [0]);
12575 MONO_ADD_INS (cfg->cbb, iargs [0]);
12576 NEW_CLASSCONST (cfg, iargs [1], klass);
12577 MONO_ADD_INS (cfg->cbb, iargs [1]);
12578 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12580 inline_costs += 10 * num_calls++;
12583 case CEE_MONO_OBJADDR:
12586 MONO_INST_NEW (cfg, ins, OP_MOVE);
12587 ins->dreg = alloc_ireg_mp (cfg);
12588 ins->sreg1 = sp [0]->dreg;
12589 ins->type = STACK_MP;
12590 MONO_ADD_INS (cfg->cbb, ins);
12594 case CEE_MONO_LDNATIVEOBJ:
12596 * Similar to LDOBJ, but instead load the unmanaged
12597 * representation of the vtype to the stack.
12602 token = read32 (ip + 2);
12603 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12604 g_assert (klass->valuetype);
12605 mono_class_init (klass);
12608 MonoInst *src, *dest, *temp;
12611 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12612 temp->backend.is_pinvoke = 1;
12613 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12614 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12616 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12617 dest->type = STACK_VTYPE;
12618 dest->klass = klass;
12624 case CEE_MONO_RETOBJ: {
12626 * Same as RET, but return the native representation of a vtype
12629 g_assert (cfg->ret);
12630 g_assert (mono_method_signature (method)->pinvoke);
12635 token = read32 (ip + 2);
12636 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12638 if (!cfg->vret_addr) {
12639 g_assert (cfg->ret_var_is_local);
12641 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12643 EMIT_NEW_RETLOADA (cfg, ins);
12645 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12647 if (sp != stack_start)
12650 MONO_INST_NEW (cfg, ins, OP_BR);
12651 ins->inst_target_bb = end_bblock;
12652 MONO_ADD_INS (cfg->cbb, ins);
12653 link_bblock (cfg, cfg->cbb, end_bblock);
12654 start_new_bblock = 1;
12658 case CEE_MONO_CISINST:
12659 case CEE_MONO_CCASTCLASS: {
12664 token = read32 (ip + 2);
12665 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12666 if (ip [1] == CEE_MONO_CISINST)
12667 ins = handle_cisinst (cfg, klass, sp [0]);
12669 ins = handle_ccastclass (cfg, klass, sp [0]);
12674 case CEE_MONO_SAVE_LMF:
12675 case CEE_MONO_RESTORE_LMF:
12678 case CEE_MONO_CLASSCONST:
12679 CHECK_STACK_OVF (1);
12681 token = read32 (ip + 2);
12682 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12685 inline_costs += 10 * num_calls++;
12687 case CEE_MONO_NOT_TAKEN:
12688 cfg->cbb->out_of_line = TRUE;
12691 case CEE_MONO_TLS: {
12694 CHECK_STACK_OVF (1);
12696 key = (MonoTlsKey)read32 (ip + 2);
12697 g_assert (key < TLS_KEY_NUM);
12699 ins = mono_create_tls_get (cfg, key);
12701 if (cfg->compile_aot) {
12703 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12704 ins->dreg = alloc_preg (cfg);
12705 ins->type = STACK_PTR;
12707 g_assert_not_reached ();
12710 ins->type = STACK_PTR;
12711 MONO_ADD_INS (cfg->cbb, ins);
12716 case CEE_MONO_DYN_CALL: {
12717 MonoCallInst *call;
12719 /* It would be easier to call a trampoline, but that would put an
12720 * extra frame on the stack, confusing exception handling. So
12721 * implement it inline using an opcode for now.
12724 if (!cfg->dyn_call_var) {
12725 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12726 /* prevent it from being register allocated */
12727 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12730 /* Has to use a call inst since it local regalloc expects it */
12731 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12732 ins = (MonoInst*)call;
12734 ins->sreg1 = sp [0]->dreg;
12735 ins->sreg2 = sp [1]->dreg;
12736 MONO_ADD_INS (cfg->cbb, ins);
12738 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12741 inline_costs += 10 * num_calls++;
12745 case CEE_MONO_MEMORY_BARRIER: {
12747 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12751 case CEE_MONO_JIT_ATTACH: {
12752 MonoInst *args [16], *domain_ins;
12753 MonoInst *ad_ins, *jit_tls_ins;
12754 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12756 cfg->attach_cookie = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12757 cfg->attach_dummy = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12759 if (mono_threads_is_coop_enabled ()) {
12760 /* AOT code is only used in the root domain */
12761 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12762 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12763 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12764 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12766 EMIT_NEW_PCONST (cfg, ins, NULL);
12767 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12769 ad_ins = mono_get_domain_intrinsic (cfg);
12770 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12772 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12773 NEW_BBLOCK (cfg, next_bb);
12774 NEW_BBLOCK (cfg, call_bb);
12776 if (cfg->compile_aot) {
12777 /* AOT code is only used in the root domain */
12778 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12780 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12782 MONO_ADD_INS (cfg->cbb, ad_ins);
12783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12784 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12786 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12790 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12791 MONO_START_BB (cfg, call_bb);
12794 /* AOT code is only used in the root domain */
12795 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12796 EMIT_NEW_PCONST (cfg, args [1], NULL);
12797 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12798 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->attach_cookie->dreg, ins->dreg);
12801 MONO_START_BB (cfg, next_bb);
12807 case CEE_MONO_JIT_DETACH: {
12808 MonoInst *args [16];
12810 /* Restore the original domain */
12811 dreg = alloc_ireg (cfg);
12812 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->attach_cookie->dreg);
12813 EMIT_NEW_VARLOADA (cfg, args [1], cfg->attach_dummy, cfg->attach_dummy->inst_vtype);
12814 mono_emit_jit_icall (cfg, mono_jit_thread_detach, args);
12818 case CEE_MONO_CALLI_EXTRA_ARG: {
12820 MonoMethodSignature *fsig;
12824 * This is the same as CEE_CALLI, but passes an additional argument
12825 * to the called method in llvmonly mode.
12826 * This is only used by delegate invoke wrappers to call the
12827 * actual delegate method.
12829 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12832 token = read32 (ip + 2);
12840 fsig = mini_get_signature (method, token, generic_context);
12842 if (cfg->llvm_only)
12843 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12845 n = fsig->param_count + fsig->hasthis + 1;
12852 if (cfg->llvm_only) {
12854 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12855 * cconv. This is set by mono_init_delegate ().
12857 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12858 MonoInst *callee = addr;
12859 MonoInst *call, *localloc_ins;
12860 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12861 int low_bit_reg = alloc_preg (cfg);
12863 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12864 NEW_BBLOCK (cfg, end_bb);
12866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12867 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12868 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12870 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12871 addr = emit_get_rgctx_sig (cfg, context_used,
12872 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12874 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12876 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12877 ins->dreg = alloc_preg (cfg);
12878 ins->inst_imm = 2 * SIZEOF_VOID_P;
12879 MONO_ADD_INS (cfg->cbb, ins);
12880 localloc_ins = ins;
12881 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12882 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12885 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12886 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12888 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12889 MONO_START_BB (cfg, is_gsharedvt_bb);
12890 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12891 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12892 ins->dreg = call->dreg;
12894 MONO_START_BB (cfg, end_bb);
12896 /* Caller uses a normal calling conv */
12898 MonoInst *callee = addr;
12899 MonoInst *call, *localloc_ins;
12900 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12901 int low_bit_reg = alloc_preg (cfg);
12903 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12904 NEW_BBLOCK (cfg, end_bb);
12906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12910 /* Normal case: callee uses a normal cconv, no conversion is needed */
12911 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12912 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12913 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12914 MONO_START_BB (cfg, is_gsharedvt_bb);
12915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12916 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12917 MONO_ADD_INS (cfg->cbb, addr);
12919 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12921 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12922 ins->dreg = alloc_preg (cfg);
12923 ins->inst_imm = 2 * SIZEOF_VOID_P;
12924 MONO_ADD_INS (cfg->cbb, ins);
12925 localloc_ins = ins;
12926 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12930 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12931 ins->dreg = call->dreg;
12932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12934 MONO_START_BB (cfg, end_bb);
12937 /* Same as CEE_CALLI */
12938 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12940 * We pass the address to the gsharedvt trampoline in the rgctx reg
12942 MonoInst *callee = addr;
12944 addr = emit_get_rgctx_sig (cfg, context_used,
12945 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12946 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12948 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12952 if (!MONO_TYPE_IS_VOID (fsig->ret))
12953 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12955 CHECK_CFG_EXCEPTION;
12959 constrained_class = NULL;
12963 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12969 case CEE_PREFIX1: {
12972 case CEE_ARGLIST: {
12973 /* somewhat similar to LDTOKEN */
12974 MonoInst *addr, *vtvar;
12975 CHECK_STACK_OVF (1);
12976 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12978 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12979 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12981 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12982 ins->type = STACK_VTYPE;
12983 ins->klass = mono_defaults.argumenthandle_class;
12993 MonoInst *cmp, *arg1, *arg2;
13001 * The following transforms:
13002 * CEE_CEQ into OP_CEQ
13003 * CEE_CGT into OP_CGT
13004 * CEE_CGT_UN into OP_CGT_UN
13005 * CEE_CLT into OP_CLT
13006 * CEE_CLT_UN into OP_CLT_UN
13008 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13010 MONO_INST_NEW (cfg, ins, cmp->opcode);
13011 cmp->sreg1 = arg1->dreg;
13012 cmp->sreg2 = arg2->dreg;
13013 type_from_op (cfg, cmp, arg1, arg2);
13015 add_widen_op (cfg, cmp, &arg1, &arg2);
13016 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13017 cmp->opcode = OP_LCOMPARE;
13018 else if (arg1->type == STACK_R4)
13019 cmp->opcode = OP_RCOMPARE;
13020 else if (arg1->type == STACK_R8)
13021 cmp->opcode = OP_FCOMPARE;
13023 cmp->opcode = OP_ICOMPARE;
13024 MONO_ADD_INS (cfg->cbb, cmp);
13025 ins->type = STACK_I4;
13026 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13027 type_from_op (cfg, ins, arg1, arg2);
13029 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13031 * The backends expect the fceq opcodes to do the
13034 ins->sreg1 = cmp->sreg1;
13035 ins->sreg2 = cmp->sreg2;
13038 MONO_ADD_INS (cfg->cbb, ins);
13044 MonoInst *argconst;
13045 MonoMethod *cil_method;
13047 CHECK_STACK_OVF (1);
13049 n = read32 (ip + 2);
13050 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13053 mono_class_init (cmethod->klass);
13055 mono_save_token_info (cfg, image, n, cmethod);
13057 context_used = mini_method_check_context_used (cfg, cmethod);
13059 cil_method = cmethod;
13060 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13061 METHOD_ACCESS_FAILURE (method, cil_method);
13063 if (mono_security_core_clr_enabled ())
13064 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13067 * Optimize the common case of ldftn+delegate creation
13069 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13070 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13071 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13072 MonoInst *target_ins, *handle_ins;
13073 MonoMethod *invoke;
13074 int invoke_context_used;
13076 invoke = mono_get_delegate_invoke (ctor_method->klass);
13077 if (!invoke || !mono_method_signature (invoke))
13080 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13082 target_ins = sp [-1];
13084 if (mono_security_core_clr_enabled ())
13085 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13087 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13088 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13089 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13091 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13095 /* FIXME: SGEN support */
13096 if (invoke_context_used == 0 || cfg->llvm_only) {
13098 if (cfg->verbose_level > 3)
13099 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13100 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13103 CHECK_CFG_EXCEPTION;
13113 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13114 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13118 inline_costs += 10 * num_calls++;
13121 case CEE_LDVIRTFTN: {
13122 MonoInst *args [2];
13126 n = read32 (ip + 2);
13127 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13130 mono_class_init (cmethod->klass);
13132 context_used = mini_method_check_context_used (cfg, cmethod);
13134 if (mono_security_core_clr_enabled ())
13135 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13138 * Optimize the common case of ldvirtftn+delegate creation
13140 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13141 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13142 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13143 MonoInst *target_ins, *handle_ins;
13144 MonoMethod *invoke;
13145 int invoke_context_used;
13146 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13148 invoke = mono_get_delegate_invoke (ctor_method->klass);
13149 if (!invoke || !mono_method_signature (invoke))
13152 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13154 target_ins = sp [-1];
13156 if (mono_security_core_clr_enabled ())
13157 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13159 /* FIXME: SGEN support */
13160 if (invoke_context_used == 0 || cfg->llvm_only) {
13162 if (cfg->verbose_level > 3)
13163 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13164 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13167 CHECK_CFG_EXCEPTION;
13180 args [1] = emit_get_rgctx_method (cfg, context_used,
13181 cmethod, MONO_RGCTX_INFO_METHOD);
13184 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13186 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13189 inline_costs += 10 * num_calls++;
13193 CHECK_STACK_OVF (1);
13195 n = read16 (ip + 2);
13197 EMIT_NEW_ARGLOAD (cfg, ins, n);
13202 CHECK_STACK_OVF (1);
13204 n = read16 (ip + 2);
13206 NEW_ARGLOADA (cfg, ins, n);
13207 MONO_ADD_INS (cfg->cbb, ins);
13215 n = read16 (ip + 2);
13217 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13219 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13223 CHECK_STACK_OVF (1);
13225 n = read16 (ip + 2);
13227 EMIT_NEW_LOCLOAD (cfg, ins, n);
13232 unsigned char *tmp_ip;
13233 CHECK_STACK_OVF (1);
13235 n = read16 (ip + 2);
13238 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13244 EMIT_NEW_LOCLOADA (cfg, ins, n);
13253 n = read16 (ip + 2);
13255 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13257 emit_stloc_ir (cfg, sp, header, n);
13264 if (sp != stack_start)
13266 if (cfg->method != method)
13268 * Inlining this into a loop in a parent could lead to
13269 * stack overflows which is different behavior than the
13270 * non-inlined case, thus disable inlining in this case.
13272 INLINE_FAILURE("localloc");
13274 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13275 ins->dreg = alloc_preg (cfg);
13276 ins->sreg1 = sp [0]->dreg;
13277 ins->type = STACK_PTR;
13278 MONO_ADD_INS (cfg->cbb, ins);
13280 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13282 ins->flags |= MONO_INST_INIT;
13287 case CEE_ENDFILTER: {
13288 MonoExceptionClause *clause, *nearest;
13293 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13295 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13296 ins->sreg1 = (*sp)->dreg;
13297 MONO_ADD_INS (cfg->cbb, ins);
13298 start_new_bblock = 1;
13302 for (cc = 0; cc < header->num_clauses; ++cc) {
13303 clause = &header->clauses [cc];
13304 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13305 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13306 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13309 g_assert (nearest);
13310 if ((ip - header->code) != nearest->handler_offset)
13315 case CEE_UNALIGNED_:
13316 ins_flag |= MONO_INST_UNALIGNED;
13317 /* FIXME: record alignment? we can assume 1 for now */
13321 case CEE_VOLATILE_:
13322 ins_flag |= MONO_INST_VOLATILE;
13326 ins_flag |= MONO_INST_TAILCALL;
13327 cfg->flags |= MONO_CFG_HAS_TAIL;
13328 /* Can't inline tail calls at this time */
13329 inline_costs += 100000;
13336 token = read32 (ip + 2);
13337 klass = mini_get_class (method, token, generic_context);
13338 CHECK_TYPELOAD (klass);
13339 if (generic_class_is_reference_type (cfg, klass))
13340 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13342 mini_emit_initobj (cfg, *sp, NULL, klass);
13346 case CEE_CONSTRAINED_:
13348 token = read32 (ip + 2);
13349 constrained_class = mini_get_class (method, token, generic_context);
13350 CHECK_TYPELOAD (constrained_class);
13354 case CEE_INITBLK: {
13355 MonoInst *iargs [3];
13359 /* Skip optimized paths for volatile operations. */
13360 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13361 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13362 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13363 /* emit_memset only works when val == 0 */
13364 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13367 iargs [0] = sp [0];
13368 iargs [1] = sp [1];
13369 iargs [2] = sp [2];
13370 if (ip [1] == CEE_CPBLK) {
13372 * FIXME: It's unclear whether we should be emitting both the acquire
13373 * and release barriers for cpblk. It is technically both a load and
13374 * store operation, so it seems like that's the sensible thing to do.
13376 * FIXME: We emit full barriers on both sides of the operation for
13377 * simplicity. We should have a separate atomic memcpy method instead.
13379 MonoMethod *memcpy_method = get_memcpy_method ();
13381 if (ins_flag & MONO_INST_VOLATILE)
13382 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13384 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13385 call->flags |= ins_flag;
13387 if (ins_flag & MONO_INST_VOLATILE)
13388 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13390 MonoMethod *memset_method = get_memset_method ();
13391 if (ins_flag & MONO_INST_VOLATILE) {
13392 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13393 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13395 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13396 call->flags |= ins_flag;
13407 ins_flag |= MONO_INST_NOTYPECHECK;
13409 ins_flag |= MONO_INST_NORANGECHECK;
13410 /* we ignore the no-nullcheck for now since we
13411 * really do it explicitly only when doing callvirt->call
13415 case CEE_RETHROW: {
13417 int handler_offset = -1;
13419 for (i = 0; i < header->num_clauses; ++i) {
13420 MonoExceptionClause *clause = &header->clauses [i];
13421 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13422 handler_offset = clause->handler_offset;
13427 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13429 if (handler_offset == -1)
13432 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13433 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13434 ins->sreg1 = load->dreg;
13435 MONO_ADD_INS (cfg->cbb, ins);
13437 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13438 MONO_ADD_INS (cfg->cbb, ins);
13441 link_bblock (cfg, cfg->cbb, end_bblock);
13442 start_new_bblock = 1;
13450 CHECK_STACK_OVF (1);
13452 token = read32 (ip + 2);
13453 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13454 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13457 val = mono_type_size (type, &ialign);
13459 MonoClass *klass = mini_get_class (method, token, generic_context);
13460 CHECK_TYPELOAD (klass);
13462 val = mono_type_size (&klass->byval_arg, &ialign);
13464 if (mini_is_gsharedvt_klass (klass))
13465 GSHAREDVT_FAILURE (*ip);
13467 EMIT_NEW_ICONST (cfg, ins, val);
13472 case CEE_REFANYTYPE: {
13473 MonoInst *src_var, *src;
13475 GSHAREDVT_FAILURE (*ip);
13481 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13483 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13484 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13485 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13490 case CEE_READONLY_:
13503 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13513 g_warning ("opcode 0x%02x not handled", *ip);
13517 if (start_new_bblock != 1)
13520 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13521 if (cfg->cbb->next_bb) {
13522 /* This could already be set because of inlining, #693905 */
13523 MonoBasicBlock *bb = cfg->cbb;
13525 while (bb->next_bb)
13527 bb->next_bb = end_bblock;
13529 cfg->cbb->next_bb = end_bblock;
13532 if (cfg->method == method && cfg->domainvar) {
13534 MonoInst *get_domain;
13536 cfg->cbb = init_localsbb;
13538 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13539 MONO_ADD_INS (cfg->cbb, get_domain);
13541 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13543 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13544 MONO_ADD_INS (cfg->cbb, store);
13547 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13548 if (cfg->compile_aot)
13549 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13550 mono_get_got_var (cfg);
13553 if (cfg->method == method && cfg->got_var)
13554 mono_emit_load_got_addr (cfg);
13556 if (init_localsbb) {
13557 cfg->cbb = init_localsbb;
13559 for (i = 0; i < header->num_locals; ++i) {
13560 emit_init_local (cfg, i, header->locals [i], init_locals);
13564 if (cfg->init_ref_vars && cfg->method == method) {
13565 /* Emit initialization for ref vars */
13566 // FIXME: Avoid duplication initialization for IL locals.
13567 for (i = 0; i < cfg->num_varinfo; ++i) {
13568 MonoInst *ins = cfg->varinfo [i];
13570 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13571 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13575 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13576 cfg->cbb = init_localsbb;
13577 emit_push_lmf (cfg);
13580 cfg->cbb = init_localsbb;
13581 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13584 MonoBasicBlock *bb;
13587 * Make seq points at backward branch targets interruptable.
13589 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13590 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13591 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13594 /* Add a sequence point for method entry/exit events */
13595 if (seq_points && cfg->gen_sdb_seq_points) {
13596 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13597 MONO_ADD_INS (init_localsbb, ins);
13598 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13599 MONO_ADD_INS (cfg->bb_exit, ins);
13603 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13604 * the code they refer to was dead (#11880).
13606 if (sym_seq_points) {
13607 for (i = 0; i < header->code_size; ++i) {
13608 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13611 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13612 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13619 if (cfg->method == method) {
13620 MonoBasicBlock *bb;
13621 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13622 bb->region = mono_find_block_region (cfg, bb->real_offset);
13624 mono_create_spvar_for_region (cfg, bb->region);
13625 if (cfg->verbose_level > 2)
13626 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13630 if (inline_costs < 0) {
13633 /* Method is too large */
13634 mname = mono_method_full_name (method, TRUE);
13635 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13639 if ((cfg->verbose_level > 2) && (cfg->method == method))
13640 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13645 g_assert (!mono_error_ok (&cfg->error));
13649 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13653 set_exception_type_from_invalid_il (cfg, method, ip);
13657 g_slist_free (class_inits);
13658 mono_basic_block_free (original_bb);
13659 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13660 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13661 if (cfg->exception_type)
13664 return inline_costs;
13668 store_membase_reg_to_store_membase_imm (int opcode)
13671 case OP_STORE_MEMBASE_REG:
13672 return OP_STORE_MEMBASE_IMM;
13673 case OP_STOREI1_MEMBASE_REG:
13674 return OP_STOREI1_MEMBASE_IMM;
13675 case OP_STOREI2_MEMBASE_REG:
13676 return OP_STOREI2_MEMBASE_IMM;
13677 case OP_STOREI4_MEMBASE_REG:
13678 return OP_STOREI4_MEMBASE_IMM;
13679 case OP_STOREI8_MEMBASE_REG:
13680 return OP_STOREI8_MEMBASE_IMM;
13682 g_assert_not_reached ();
13689 mono_op_to_op_imm (int opcode)
13693 return OP_IADD_IMM;
13695 return OP_ISUB_IMM;
13697 return OP_IDIV_IMM;
13699 return OP_IDIV_UN_IMM;
13701 return OP_IREM_IMM;
13703 return OP_IREM_UN_IMM;
13705 return OP_IMUL_IMM;
13707 return OP_IAND_IMM;
13711 return OP_IXOR_IMM;
13713 return OP_ISHL_IMM;
13715 return OP_ISHR_IMM;
13717 return OP_ISHR_UN_IMM;
13720 return OP_LADD_IMM;
13722 return OP_LSUB_IMM;
13724 return OP_LAND_IMM;
13728 return OP_LXOR_IMM;
13730 return OP_LSHL_IMM;
13732 return OP_LSHR_IMM;
13734 return OP_LSHR_UN_IMM;
13735 #if SIZEOF_REGISTER == 8
13737 return OP_LREM_IMM;
13741 return OP_COMPARE_IMM;
13743 return OP_ICOMPARE_IMM;
13745 return OP_LCOMPARE_IMM;
13747 case OP_STORE_MEMBASE_REG:
13748 return OP_STORE_MEMBASE_IMM;
13749 case OP_STOREI1_MEMBASE_REG:
13750 return OP_STOREI1_MEMBASE_IMM;
13751 case OP_STOREI2_MEMBASE_REG:
13752 return OP_STOREI2_MEMBASE_IMM;
13753 case OP_STOREI4_MEMBASE_REG:
13754 return OP_STOREI4_MEMBASE_IMM;
13756 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13758 return OP_X86_PUSH_IMM;
13759 case OP_X86_COMPARE_MEMBASE_REG:
13760 return OP_X86_COMPARE_MEMBASE_IMM;
13762 #if defined(TARGET_AMD64)
13763 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13764 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13766 case OP_VOIDCALL_REG:
13767 return OP_VOIDCALL;
13775 return OP_LOCALLOC_IMM;
13782 ldind_to_load_membase (int opcode)
13786 return OP_LOADI1_MEMBASE;
13788 return OP_LOADU1_MEMBASE;
13790 return OP_LOADI2_MEMBASE;
13792 return OP_LOADU2_MEMBASE;
13794 return OP_LOADI4_MEMBASE;
13796 return OP_LOADU4_MEMBASE;
13798 return OP_LOAD_MEMBASE;
13799 case CEE_LDIND_REF:
13800 return OP_LOAD_MEMBASE;
13802 return OP_LOADI8_MEMBASE;
13804 return OP_LOADR4_MEMBASE;
13806 return OP_LOADR8_MEMBASE;
13808 g_assert_not_reached ();
13815 stind_to_store_membase (int opcode)
13819 return OP_STOREI1_MEMBASE_REG;
13821 return OP_STOREI2_MEMBASE_REG;
13823 return OP_STOREI4_MEMBASE_REG;
13825 case CEE_STIND_REF:
13826 return OP_STORE_MEMBASE_REG;
13828 return OP_STOREI8_MEMBASE_REG;
13830 return OP_STORER4_MEMBASE_REG;
13832 return OP_STORER8_MEMBASE_REG;
13834 g_assert_not_reached ();
13841 mono_load_membase_to_load_mem (int opcode)
13843 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13844 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13846 case OP_LOAD_MEMBASE:
13847 return OP_LOAD_MEM;
13848 case OP_LOADU1_MEMBASE:
13849 return OP_LOADU1_MEM;
13850 case OP_LOADU2_MEMBASE:
13851 return OP_LOADU2_MEM;
13852 case OP_LOADI4_MEMBASE:
13853 return OP_LOADI4_MEM;
13854 case OP_LOADU4_MEMBASE:
13855 return OP_LOADU4_MEM;
13856 #if SIZEOF_REGISTER == 8
13857 case OP_LOADI8_MEMBASE:
13858 return OP_LOADI8_MEM;
13867 op_to_op_dest_membase (int store_opcode, int opcode)
13869 #if defined(TARGET_X86)
13870 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13875 return OP_X86_ADD_MEMBASE_REG;
13877 return OP_X86_SUB_MEMBASE_REG;
13879 return OP_X86_AND_MEMBASE_REG;
13881 return OP_X86_OR_MEMBASE_REG;
13883 return OP_X86_XOR_MEMBASE_REG;
13886 return OP_X86_ADD_MEMBASE_IMM;
13889 return OP_X86_SUB_MEMBASE_IMM;
13892 return OP_X86_AND_MEMBASE_IMM;
13895 return OP_X86_OR_MEMBASE_IMM;
13898 return OP_X86_XOR_MEMBASE_IMM;
13904 #if defined(TARGET_AMD64)
13905 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13910 return OP_X86_ADD_MEMBASE_REG;
13912 return OP_X86_SUB_MEMBASE_REG;
13914 return OP_X86_AND_MEMBASE_REG;
13916 return OP_X86_OR_MEMBASE_REG;
13918 return OP_X86_XOR_MEMBASE_REG;
13920 return OP_X86_ADD_MEMBASE_IMM;
13922 return OP_X86_SUB_MEMBASE_IMM;
13924 return OP_X86_AND_MEMBASE_IMM;
13926 return OP_X86_OR_MEMBASE_IMM;
13928 return OP_X86_XOR_MEMBASE_IMM;
13930 return OP_AMD64_ADD_MEMBASE_REG;
13932 return OP_AMD64_SUB_MEMBASE_REG;
13934 return OP_AMD64_AND_MEMBASE_REG;
13936 return OP_AMD64_OR_MEMBASE_REG;
13938 return OP_AMD64_XOR_MEMBASE_REG;
13941 return OP_AMD64_ADD_MEMBASE_IMM;
13944 return OP_AMD64_SUB_MEMBASE_IMM;
13947 return OP_AMD64_AND_MEMBASE_IMM;
13950 return OP_AMD64_OR_MEMBASE_IMM;
13953 return OP_AMD64_XOR_MEMBASE_IMM;
13963 op_to_op_store_membase (int store_opcode, int opcode)
13965 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13968 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13969 return OP_X86_SETEQ_MEMBASE;
13971 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13972 return OP_X86_SETNE_MEMBASE;
13980 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13983 /* FIXME: This has sign extension issues */
13985 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13986 return OP_X86_COMPARE_MEMBASE8_IMM;
13989 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13994 return OP_X86_PUSH_MEMBASE;
13995 case OP_COMPARE_IMM:
13996 case OP_ICOMPARE_IMM:
13997 return OP_X86_COMPARE_MEMBASE_IMM;
14000 return OP_X86_COMPARE_MEMBASE_REG;
14004 #ifdef TARGET_AMD64
14005 /* FIXME: This has sign extension issues */
14007 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14008 return OP_X86_COMPARE_MEMBASE8_IMM;
14013 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14014 return OP_X86_PUSH_MEMBASE;
14016 /* FIXME: This only works for 32 bit immediates
14017 case OP_COMPARE_IMM:
14018 case OP_LCOMPARE_IMM:
14019 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14020 return OP_AMD64_COMPARE_MEMBASE_IMM;
14022 case OP_ICOMPARE_IMM:
14023 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14024 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14028 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14029 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14030 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14031 return OP_AMD64_COMPARE_MEMBASE_REG;
14034 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14035 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14044 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14047 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14053 return OP_X86_COMPARE_REG_MEMBASE;
14055 return OP_X86_ADD_REG_MEMBASE;
14057 return OP_X86_SUB_REG_MEMBASE;
14059 return OP_X86_AND_REG_MEMBASE;
14061 return OP_X86_OR_REG_MEMBASE;
14063 return OP_X86_XOR_REG_MEMBASE;
14067 #ifdef TARGET_AMD64
14068 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14071 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14073 return OP_X86_ADD_REG_MEMBASE;
14075 return OP_X86_SUB_REG_MEMBASE;
14077 return OP_X86_AND_REG_MEMBASE;
14079 return OP_X86_OR_REG_MEMBASE;
14081 return OP_X86_XOR_REG_MEMBASE;
14083 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14087 return OP_AMD64_COMPARE_REG_MEMBASE;
14089 return OP_AMD64_ADD_REG_MEMBASE;
14091 return OP_AMD64_SUB_REG_MEMBASE;
14093 return OP_AMD64_AND_REG_MEMBASE;
14095 return OP_AMD64_OR_REG_MEMBASE;
14097 return OP_AMD64_XOR_REG_MEMBASE;
14106 mono_op_to_op_imm_noemul (int opcode)
14109 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14115 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14122 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14127 return mono_op_to_op_imm (opcode);
14132 * mono_handle_global_vregs:
14134 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14138 mono_handle_global_vregs (MonoCompile *cfg)
14140 gint32 *vreg_to_bb;
14141 MonoBasicBlock *bb;
14144 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14146 #ifdef MONO_ARCH_SIMD_INTRINSICS
14147 if (cfg->uses_simd_intrinsics)
14148 mono_simd_simplify_indirection (cfg);
14151 /* Find local vregs used in more than one bb */
14152 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14153 MonoInst *ins = bb->code;
14154 int block_num = bb->block_num;
14156 if (cfg->verbose_level > 2)
14157 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14160 for (; ins; ins = ins->next) {
14161 const char *spec = INS_INFO (ins->opcode);
14162 int regtype = 0, regindex;
14165 if (G_UNLIKELY (cfg->verbose_level > 2))
14166 mono_print_ins (ins);
14168 g_assert (ins->opcode >= MONO_CEE_LAST);
14170 for (regindex = 0; regindex < 4; regindex ++) {
14173 if (regindex == 0) {
14174 regtype = spec [MONO_INST_DEST];
14175 if (regtype == ' ')
14178 } else if (regindex == 1) {
14179 regtype = spec [MONO_INST_SRC1];
14180 if (regtype == ' ')
14183 } else if (regindex == 2) {
14184 regtype = spec [MONO_INST_SRC2];
14185 if (regtype == ' ')
14188 } else if (regindex == 3) {
14189 regtype = spec [MONO_INST_SRC3];
14190 if (regtype == ' ')
14195 #if SIZEOF_REGISTER == 4
14196 /* In the LLVM case, the long opcodes are not decomposed */
14197 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14199 * Since some instructions reference the original long vreg,
14200 * and some reference the two component vregs, it is quite hard
14201 * to determine when it needs to be global. So be conservative.
14203 if (!get_vreg_to_inst (cfg, vreg)) {
14204 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14206 if (cfg->verbose_level > 2)
14207 printf ("LONG VREG R%d made global.\n", vreg);
14211 * Make the component vregs volatile since the optimizations can
14212 * get confused otherwise.
14214 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14215 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14219 g_assert (vreg != -1);
14221 prev_bb = vreg_to_bb [vreg];
14222 if (prev_bb == 0) {
14223 /* 0 is a valid block num */
14224 vreg_to_bb [vreg] = block_num + 1;
14225 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14226 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14229 if (!get_vreg_to_inst (cfg, vreg)) {
14230 if (G_UNLIKELY (cfg->verbose_level > 2))
14231 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14235 if (vreg_is_ref (cfg, vreg))
14236 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14238 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14241 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14244 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14247 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14250 g_assert_not_reached ();
14254 /* Flag as having been used in more than one bb */
14255 vreg_to_bb [vreg] = -1;
14261 /* If a variable is used in only one bblock, convert it into a local vreg */
14262 for (i = 0; i < cfg->num_varinfo; i++) {
14263 MonoInst *var = cfg->varinfo [i];
14264 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14266 switch (var->type) {
14272 #if SIZEOF_REGISTER == 8
14275 #if !defined(TARGET_X86)
14276 /* Enabling this screws up the fp stack on x86 */
14279 if (mono_arch_is_soft_float ())
14283 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14287 /* Arguments are implicitly global */
14288 /* Putting R4 vars into registers doesn't work currently */
14289 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14290 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14292 * Make that the variable's liveness interval doesn't contain a call, since
14293 * that would cause the lvreg to be spilled, making the whole optimization
14296 /* This is too slow for JIT compilation */
14298 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14300 int def_index, call_index, ins_index;
14301 gboolean spilled = FALSE;
14306 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14307 const char *spec = INS_INFO (ins->opcode);
14309 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14310 def_index = ins_index;
14312 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14313 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14314 if (call_index > def_index) {
14320 if (MONO_IS_CALL (ins))
14321 call_index = ins_index;
14331 if (G_UNLIKELY (cfg->verbose_level > 2))
14332 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14333 var->flags |= MONO_INST_IS_DEAD;
14334 cfg->vreg_to_inst [var->dreg] = NULL;
14341 * Compress the varinfo and vars tables so the liveness computation is faster and
14342 * takes up less space.
14345 for (i = 0; i < cfg->num_varinfo; ++i) {
14346 MonoInst *var = cfg->varinfo [i];
14347 if (pos < i && cfg->locals_start == i)
14348 cfg->locals_start = pos;
14349 if (!(var->flags & MONO_INST_IS_DEAD)) {
14351 cfg->varinfo [pos] = cfg->varinfo [i];
14352 cfg->varinfo [pos]->inst_c0 = pos;
14353 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14354 cfg->vars [pos].idx = pos;
14355 #if SIZEOF_REGISTER == 4
14356 if (cfg->varinfo [pos]->type == STACK_I8) {
14357 /* Modify the two component vars too */
14360 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14361 var1->inst_c0 = pos;
14362 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14363 var1->inst_c0 = pos;
14370 cfg->num_varinfo = pos;
14371 if (cfg->locals_start > cfg->num_varinfo)
14372 cfg->locals_start = cfg->num_varinfo;
14376 * mono_allocate_gsharedvt_vars:
14378 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14379 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14382 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14386 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14388 for (i = 0; i < cfg->num_varinfo; ++i) {
14389 MonoInst *ins = cfg->varinfo [i];
14392 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14393 if (i >= cfg->locals_start) {
14395 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14396 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14397 ins->opcode = OP_GSHAREDVT_LOCAL;
14398 ins->inst_imm = idx;
14401 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14402 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14409 * mono_spill_global_vars:
14411 * Generate spill code for variables which are not allocated to registers,
14412 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14413 * code is generated which could be optimized by the local optimization passes.
14416 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14418 MonoBasicBlock *bb;
14420 int orig_next_vreg;
14421 guint32 *vreg_to_lvreg;
14423 guint32 i, lvregs_len;
14424 gboolean dest_has_lvreg = FALSE;
14425 MonoStackType stacktypes [128];
14426 MonoInst **live_range_start, **live_range_end;
14427 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14429 *need_local_opts = FALSE;
14431 memset (spec2, 0, sizeof (spec2));
14433 /* FIXME: Move this function to mini.c */
14434 stacktypes ['i'] = STACK_PTR;
14435 stacktypes ['l'] = STACK_I8;
14436 stacktypes ['f'] = STACK_R8;
14437 #ifdef MONO_ARCH_SIMD_INTRINSICS
14438 stacktypes ['x'] = STACK_VTYPE;
14441 #if SIZEOF_REGISTER == 4
14442 /* Create MonoInsts for longs */
14443 for (i = 0; i < cfg->num_varinfo; i++) {
14444 MonoInst *ins = cfg->varinfo [i];
14446 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14447 switch (ins->type) {
14452 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14455 g_assert (ins->opcode == OP_REGOFFSET);
14457 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14459 tree->opcode = OP_REGOFFSET;
14460 tree->inst_basereg = ins->inst_basereg;
14461 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14463 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14465 tree->opcode = OP_REGOFFSET;
14466 tree->inst_basereg = ins->inst_basereg;
14467 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14477 if (cfg->compute_gc_maps) {
14478 /* registers need liveness info even for !non refs */
14479 for (i = 0; i < cfg->num_varinfo; i++) {
14480 MonoInst *ins = cfg->varinfo [i];
14482 if (ins->opcode == OP_REGVAR)
14483 ins->flags |= MONO_INST_GC_TRACK;
14487 /* FIXME: widening and truncation */
14490 * As an optimization, when a variable allocated to the stack is first loaded into
14491 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14492 * the variable again.
14494 orig_next_vreg = cfg->next_vreg;
14495 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14496 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14500 * These arrays contain the first and last instructions accessing a given
14502 * Since we emit bblocks in the same order we process them here, and we
14503 * don't split live ranges, these will precisely describe the live range of
14504 * the variable, i.e. the instruction range where a valid value can be found
14505 * in the variables location.
14506 * The live range is computed using the liveness info computed by the liveness pass.
14507 * We can't use vmv->range, since that is an abstract live range, and we need
14508 * one which is instruction precise.
14509 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14511 /* FIXME: Only do this if debugging info is requested */
14512 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14513 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14514 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14515 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14517 /* Add spill loads/stores */
14518 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14521 if (cfg->verbose_level > 2)
14522 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14524 /* Clear vreg_to_lvreg array */
14525 for (i = 0; i < lvregs_len; i++)
14526 vreg_to_lvreg [lvregs [i]] = 0;
14530 MONO_BB_FOR_EACH_INS (bb, ins) {
14531 const char *spec = INS_INFO (ins->opcode);
14532 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14533 gboolean store, no_lvreg;
14534 int sregs [MONO_MAX_SRC_REGS];
14536 if (G_UNLIKELY (cfg->verbose_level > 2))
14537 mono_print_ins (ins);
14539 if (ins->opcode == OP_NOP)
14543 * We handle LDADDR here as well, since it can only be decomposed
14544 * when variable addresses are known.
14546 if (ins->opcode == OP_LDADDR) {
14547 MonoInst *var = (MonoInst *)ins->inst_p0;
14549 if (var->opcode == OP_VTARG_ADDR) {
14550 /* Happens on SPARC/S390 where vtypes are passed by reference */
14551 MonoInst *vtaddr = var->inst_left;
14552 if (vtaddr->opcode == OP_REGVAR) {
14553 ins->opcode = OP_MOVE;
14554 ins->sreg1 = vtaddr->dreg;
14556 else if (var->inst_left->opcode == OP_REGOFFSET) {
14557 ins->opcode = OP_LOAD_MEMBASE;
14558 ins->inst_basereg = vtaddr->inst_basereg;
14559 ins->inst_offset = vtaddr->inst_offset;
14562 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14563 /* gsharedvt arg passed by ref */
14564 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14566 ins->opcode = OP_LOAD_MEMBASE;
14567 ins->inst_basereg = var->inst_basereg;
14568 ins->inst_offset = var->inst_offset;
14569 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14570 MonoInst *load, *load2, *load3;
14571 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14572 int reg1, reg2, reg3;
14573 MonoInst *info_var = cfg->gsharedvt_info_var;
14574 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14578 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14581 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14583 g_assert (info_var);
14584 g_assert (locals_var);
14586 /* Mark the instruction used to compute the locals var as used */
14587 cfg->gsharedvt_locals_var_ins = NULL;
14589 /* Load the offset */
14590 if (info_var->opcode == OP_REGOFFSET) {
14591 reg1 = alloc_ireg (cfg);
14592 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14593 } else if (info_var->opcode == OP_REGVAR) {
14595 reg1 = info_var->dreg;
14597 g_assert_not_reached ();
14599 reg2 = alloc_ireg (cfg);
14600 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14601 /* Load the locals area address */
14602 reg3 = alloc_ireg (cfg);
14603 if (locals_var->opcode == OP_REGOFFSET) {
14604 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14605 } else if (locals_var->opcode == OP_REGVAR) {
14606 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14608 g_assert_not_reached ();
14610 /* Compute the address */
14611 ins->opcode = OP_PADD;
14615 mono_bblock_insert_before_ins (bb, ins, load3);
14616 mono_bblock_insert_before_ins (bb, load3, load2);
14618 mono_bblock_insert_before_ins (bb, load2, load);
14620 g_assert (var->opcode == OP_REGOFFSET);
14622 ins->opcode = OP_ADD_IMM;
14623 ins->sreg1 = var->inst_basereg;
14624 ins->inst_imm = var->inst_offset;
14627 *need_local_opts = TRUE;
14628 spec = INS_INFO (ins->opcode);
14631 if (ins->opcode < MONO_CEE_LAST) {
14632 mono_print_ins (ins);
14633 g_assert_not_reached ();
14637 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14641 if (MONO_IS_STORE_MEMBASE (ins)) {
14642 tmp_reg = ins->dreg;
14643 ins->dreg = ins->sreg2;
14644 ins->sreg2 = tmp_reg;
14647 spec2 [MONO_INST_DEST] = ' ';
14648 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14649 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14650 spec2 [MONO_INST_SRC3] = ' ';
14652 } else if (MONO_IS_STORE_MEMINDEX (ins))
14653 g_assert_not_reached ();
14658 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14659 printf ("\t %.3s %d", spec, ins->dreg);
14660 num_sregs = mono_inst_get_src_registers (ins, sregs);
14661 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14662 printf (" %d", sregs [srcindex]);
14669 regtype = spec [MONO_INST_DEST];
14670 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14673 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14674 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14675 MonoInst *store_ins;
14677 MonoInst *def_ins = ins;
14678 int dreg = ins->dreg; /* The original vreg */
14680 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14682 if (var->opcode == OP_REGVAR) {
14683 ins->dreg = var->dreg;
14684 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14686 * Instead of emitting a load+store, use a _membase opcode.
14688 g_assert (var->opcode == OP_REGOFFSET);
14689 if (ins->opcode == OP_MOVE) {
14693 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14694 ins->inst_basereg = var->inst_basereg;
14695 ins->inst_offset = var->inst_offset;
14698 spec = INS_INFO (ins->opcode);
14702 g_assert (var->opcode == OP_REGOFFSET);
14704 prev_dreg = ins->dreg;
14706 /* Invalidate any previous lvreg for this vreg */
14707 vreg_to_lvreg [ins->dreg] = 0;
14711 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14713 store_opcode = OP_STOREI8_MEMBASE_REG;
14716 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14718 #if SIZEOF_REGISTER != 8
14719 if (regtype == 'l') {
14720 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14721 mono_bblock_insert_after_ins (bb, ins, store_ins);
14722 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14723 mono_bblock_insert_after_ins (bb, ins, store_ins);
14724 def_ins = store_ins;
14729 g_assert (store_opcode != OP_STOREV_MEMBASE);
14731 /* Try to fuse the store into the instruction itself */
14732 /* FIXME: Add more instructions */
14733 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14734 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14735 ins->inst_imm = ins->inst_c0;
14736 ins->inst_destbasereg = var->inst_basereg;
14737 ins->inst_offset = var->inst_offset;
14738 spec = INS_INFO (ins->opcode);
14739 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14740 ins->opcode = store_opcode;
14741 ins->inst_destbasereg = var->inst_basereg;
14742 ins->inst_offset = var->inst_offset;
14746 tmp_reg = ins->dreg;
14747 ins->dreg = ins->sreg2;
14748 ins->sreg2 = tmp_reg;
14751 spec2 [MONO_INST_DEST] = ' ';
14752 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14753 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14754 spec2 [MONO_INST_SRC3] = ' ';
14756 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14757 // FIXME: The backends expect the base reg to be in inst_basereg
14758 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14760 ins->inst_basereg = var->inst_basereg;
14761 ins->inst_offset = var->inst_offset;
14762 spec = INS_INFO (ins->opcode);
14764 /* printf ("INS: "); mono_print_ins (ins); */
14765 /* Create a store instruction */
14766 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14768 /* Insert it after the instruction */
14769 mono_bblock_insert_after_ins (bb, ins, store_ins);
14771 def_ins = store_ins;
14774 * We can't assign ins->dreg to var->dreg here, since the
14775 * sregs could use it. So set a flag, and do it after
14778 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14779 dest_has_lvreg = TRUE;
14784 if (def_ins && !live_range_start [dreg]) {
14785 live_range_start [dreg] = def_ins;
14786 live_range_start_bb [dreg] = bb;
14789 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14792 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14793 tmp->inst_c1 = dreg;
14794 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14801 num_sregs = mono_inst_get_src_registers (ins, sregs);
14802 for (srcindex = 0; srcindex < 3; ++srcindex) {
14803 regtype = spec [MONO_INST_SRC1 + srcindex];
14804 sreg = sregs [srcindex];
14806 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14807 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14808 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14809 MonoInst *use_ins = ins;
14810 MonoInst *load_ins;
14811 guint32 load_opcode;
14813 if (var->opcode == OP_REGVAR) {
14814 sregs [srcindex] = var->dreg;
14815 //mono_inst_set_src_registers (ins, sregs);
14816 live_range_end [sreg] = use_ins;
14817 live_range_end_bb [sreg] = bb;
14819 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14822 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14823 /* var->dreg is a hreg */
14824 tmp->inst_c1 = sreg;
14825 mono_bblock_insert_after_ins (bb, ins, tmp);
14831 g_assert (var->opcode == OP_REGOFFSET);
14833 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14835 g_assert (load_opcode != OP_LOADV_MEMBASE);
14837 if (vreg_to_lvreg [sreg]) {
14838 g_assert (vreg_to_lvreg [sreg] != -1);
14840 /* The variable is already loaded to an lvreg */
14841 if (G_UNLIKELY (cfg->verbose_level > 2))
14842 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14843 sregs [srcindex] = vreg_to_lvreg [sreg];
14844 //mono_inst_set_src_registers (ins, sregs);
14848 /* Try to fuse the load into the instruction */
14849 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14850 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14851 sregs [0] = var->inst_basereg;
14852 //mono_inst_set_src_registers (ins, sregs);
14853 ins->inst_offset = var->inst_offset;
14854 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14855 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14856 sregs [1] = var->inst_basereg;
14857 //mono_inst_set_src_registers (ins, sregs);
14858 ins->inst_offset = var->inst_offset;
14860 if (MONO_IS_REAL_MOVE (ins)) {
14861 ins->opcode = OP_NOP;
14864 //printf ("%d ", srcindex); mono_print_ins (ins);
14866 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14868 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14869 if (var->dreg == prev_dreg) {
14871 * sreg refers to the value loaded by the load
14872 * emitted below, but we need to use ins->dreg
14873 * since it refers to the store emitted earlier.
14877 g_assert (sreg != -1);
14878 vreg_to_lvreg [var->dreg] = sreg;
14879 g_assert (lvregs_len < 1024);
14880 lvregs [lvregs_len ++] = var->dreg;
14884 sregs [srcindex] = sreg;
14885 //mono_inst_set_src_registers (ins, sregs);
14887 #if SIZEOF_REGISTER != 8
14888 if (regtype == 'l') {
14889 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14890 mono_bblock_insert_before_ins (bb, ins, load_ins);
14891 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14892 mono_bblock_insert_before_ins (bb, ins, load_ins);
14893 use_ins = load_ins;
14898 #if SIZEOF_REGISTER == 4
14899 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14901 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14902 mono_bblock_insert_before_ins (bb, ins, load_ins);
14903 use_ins = load_ins;
14907 if (var->dreg < orig_next_vreg) {
14908 live_range_end [var->dreg] = use_ins;
14909 live_range_end_bb [var->dreg] = bb;
14912 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14915 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14916 tmp->inst_c1 = var->dreg;
14917 mono_bblock_insert_after_ins (bb, ins, tmp);
14921 mono_inst_set_src_registers (ins, sregs);
14923 if (dest_has_lvreg) {
14924 g_assert (ins->dreg != -1);
14925 vreg_to_lvreg [prev_dreg] = ins->dreg;
14926 g_assert (lvregs_len < 1024);
14927 lvregs [lvregs_len ++] = prev_dreg;
14928 dest_has_lvreg = FALSE;
14932 tmp_reg = ins->dreg;
14933 ins->dreg = ins->sreg2;
14934 ins->sreg2 = tmp_reg;
14937 if (MONO_IS_CALL (ins)) {
14938 /* Clear vreg_to_lvreg array */
14939 for (i = 0; i < lvregs_len; i++)
14940 vreg_to_lvreg [lvregs [i]] = 0;
14942 } else if (ins->opcode == OP_NOP) {
14944 MONO_INST_NULLIFY_SREGS (ins);
14947 if (cfg->verbose_level > 2)
14948 mono_print_ins_index (1, ins);
14951 /* Extend the live range based on the liveness info */
14952 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14953 for (i = 0; i < cfg->num_varinfo; i ++) {
14954 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14956 if (vreg_is_volatile (cfg, vi->vreg))
14957 /* The liveness info is incomplete */
14960 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14961 /* Live from at least the first ins of this bb */
14962 live_range_start [vi->vreg] = bb->code;
14963 live_range_start_bb [vi->vreg] = bb;
14966 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14967 /* Live at least until the last ins of this bb */
14968 live_range_end [vi->vreg] = bb->last_ins;
14969 live_range_end_bb [vi->vreg] = bb;
14976 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14977 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14979 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14980 for (i = 0; i < cfg->num_varinfo; ++i) {
14981 int vreg = MONO_VARINFO (cfg, i)->vreg;
14984 if (live_range_start [vreg]) {
14985 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14987 ins->inst_c1 = vreg;
14988 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14990 if (live_range_end [vreg]) {
14991 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14993 ins->inst_c1 = vreg;
14994 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14995 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14997 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15002 if (cfg->gsharedvt_locals_var_ins) {
15003 /* Nullify if unused */
15004 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15005 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15008 g_free (live_range_start);
15009 g_free (live_range_end);
15010 g_free (live_range_start_bb);
15011 g_free (live_range_end_bb);
15016 * - use 'iadd' instead of 'int_add'
15017 * - handling ovf opcodes: decompose in method_to_ir.
15018 * - unify iregs/fregs
15019 * -> partly done, the missing parts are:
15020 * - a more complete unification would involve unifying the hregs as well, so
15021 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15022 * would no longer map to the machine hregs, so the code generators would need to
15023 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15024 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15025 * fp/non-fp branches speeds it up by about 15%.
15026 * - use sext/zext opcodes instead of shifts
15028 * - get rid of TEMPLOADs if possible and use vregs instead
15029 * - clean up usage of OP_P/OP_ opcodes
15030 * - cleanup usage of DUMMY_USE
15031 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15033 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15034 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15035 * - make sure handle_stack_args () is called before the branch is emitted
15036 * - when the new IR is done, get rid of all unused stuff
15037 * - COMPARE/BEQ as separate instructions or unify them ?
15038 * - keeping them separate allows specialized compare instructions like
15039 * compare_imm, compare_membase
15040 * - most back ends unify fp compare+branch, fp compare+ceq
15041 * - integrate mono_save_args into inline_method
15042 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15043 * - handle long shift opts on 32 bit platforms somehow: they require
15044 * 3 sregs (2 for arg1 and 1 for arg2)
15045 * - make byref a 'normal' type.
15046 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15047 * variable if needed.
15048 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15049 * like inline_method.
15050 * - remove inlining restrictions
15051 * - fix LNEG and enable cfold of INEG
15052 * - generalize x86 optimizations like ldelema as a peephole optimization
15053 * - add store_mem_imm for amd64
15054 * - optimize the loading of the interruption flag in the managed->native wrappers
15055 * - avoid special handling of OP_NOP in passes
15056 * - move code inserting instructions into one function/macro.
15057 * - try a coalescing phase after liveness analysis
15058 * - add float -> vreg conversion + local optimizations on !x86
15059 * - figure out how to handle decomposed branches during optimizations, ie.
15060 * compare+branch, op_jump_table+op_br etc.
15061 * - promote RuntimeXHandles to vregs
15062 * - vtype cleanups:
15063 * - add a NEW_VARLOADA_VREG macro
15064 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15065 * accessing vtype fields.
15066 * - get rid of I8CONST on 64 bit platforms
15067 * - dealing with the increase in code size due to branches created during opcode
15069 * - use extended basic blocks
15070 * - all parts of the JIT
15071 * - handle_global_vregs () && local regalloc
15072 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15073 * - sources of increase in code size:
15076 * - isinst and castclass
15077 * - lvregs not allocated to global registers even if used multiple times
15078 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15080 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15081 * - add all micro optimizations from the old JIT
15082 * - put tree optimizations into the deadce pass
15083 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15084 * specific function.
15085 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15086 * fcompare + branchCC.
15087 * - create a helper function for allocating a stack slot, taking into account
15088 * MONO_CFG_HAS_SPILLUP.
15090 * - merge the ia64 switch changes.
15091 * - optimize mono_regstate2_alloc_int/float.
15092 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15093 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15094 * parts of the tree could be separated by other instructions, killing the tree
15095 * arguments, or stores killing loads etc. Also, should we fold loads into other
15096 * instructions if the result of the load is used multiple times ?
15097 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15098 * - LAST MERGE: 108395.
15099 * - when returning vtypes in registers, generate IR and append it to the end of the
15100 * last bb instead of doing it in the epilog.
15101 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15109 - When to decompose opcodes:
15110 - earlier: this makes some optimizations hard to implement, since the low level IR
15111 no longer contains the neccessary information. But it is easier to do.
15112 - later: harder to implement, enables more optimizations.
15113 - Branches inside bblocks:
15114 - created when decomposing complex opcodes.
15115 - branches to another bblock: harmless, but not tracked by the branch
15116 optimizations, so need to branch to a label at the start of the bblock.
15117 - branches to inside the same bblock: very problematic, trips up the local
15118 reg allocator. Can be fixed by spitting the current bblock, but that is a
15119 complex operation, since some local vregs can become global vregs etc.
15120 - Local/global vregs:
15121 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15122 local register allocator.
15123 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15124 structure, created by mono_create_var (). Assigned to hregs or the stack by
15125 the global register allocator.
15126 - When to do optimizations like alu->alu_imm:
15127 - earlier -> saves work later on since the IR will be smaller/simpler
15128 - later -> can work on more instructions
15129 - Handling of valuetypes:
15130 - When a vtype is pushed on the stack, a new temporary is created, an
15131 instruction computing its address (LDADDR) is emitted and pushed on
15132 the stack. Need to optimize cases when the vtype is used immediately as in
15133 argument passing, stloc etc.
15134 - Instead of the to_end stuff in the old JIT, simply call the function handling
15135 the values on the stack before emitting the last instruction of the bb.
15138 #endif /* DISABLE_JIT */