2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
28 #ifdef HAVE_SYS_TIME_H
36 #include <mono/utils/memcheck.h>
38 #include <mono/metadata/abi-details.h>
39 #include <mono/metadata/assembly.h>
40 #include <mono/metadata/attrdefs.h>
41 #include <mono/metadata/loader.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/class.h>
44 #include <mono/metadata/object.h>
45 #include <mono/metadata/exception.h>
46 #include <mono/metadata/opcodes.h>
47 #include <mono/metadata/mono-endian.h>
48 #include <mono/metadata/tokentype.h>
49 #include <mono/metadata/tabledefs.h>
50 #include <mono/metadata/marshal.h>
51 #include <mono/metadata/debug-helpers.h>
52 #include <mono/metadata/mono-debug.h>
53 #include <mono/metadata/mono-debug-debugger.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/metadata/debug-mono-symfile.h>
62 #include <mono/utils/mono-compiler.h>
63 #include <mono/utils/mono-memory-model.h>
64 #include <mono/utils/mono-error-internals.h>
65 #include <mono/metadata/mono-basic-block.h>
66 #include <mono/metadata/reflection-internals.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
148 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
149 guchar *ip, guint real_offset, gboolean inline_always);
151 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
153 /* helper methods signatures */
154 static MonoMethodSignature *helper_sig_domain_get;
155 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
156 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
204 #define MONO_INIT_VARINFO(vi,id) do { \
205 (vi)->range.first_use.pos.bid = 0xffff; \
211 mono_alloc_ireg (MonoCompile *cfg)
213 return alloc_ireg (cfg);
217 mono_alloc_lreg (MonoCompile *cfg)
219 return alloc_lreg (cfg);
223 mono_alloc_freg (MonoCompile *cfg)
225 return alloc_freg (cfg);
229 mono_alloc_preg (MonoCompile *cfg)
231 return alloc_preg (cfg);
235 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
237 return alloc_dreg (cfg, stack_type);
241 * mono_alloc_ireg_ref:
243 * Allocate an IREG, and mark it as holding a GC ref.
246 mono_alloc_ireg_ref (MonoCompile *cfg)
248 return alloc_ireg_ref (cfg);
252 * mono_alloc_ireg_mp:
254 * Allocate an IREG, and mark it as holding a managed pointer.
257 mono_alloc_ireg_mp (MonoCompile *cfg)
259 return alloc_ireg_mp (cfg);
263 * mono_alloc_ireg_copy:
265 * Allocate an IREG with the same GC type as VREG.
268 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
270 if (vreg_is_ref (cfg, vreg))
271 return alloc_ireg_ref (cfg);
272 else if (vreg_is_mp (cfg, vreg))
273 return alloc_ireg_mp (cfg);
275 return alloc_ireg (cfg);
279 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
284 type = mini_get_underlying_type (type);
286 switch (type->type) {
299 case MONO_TYPE_FNPTR:
301 case MONO_TYPE_CLASS:
302 case MONO_TYPE_STRING:
303 case MONO_TYPE_OBJECT:
304 case MONO_TYPE_SZARRAY:
305 case MONO_TYPE_ARRAY:
309 #if SIZEOF_REGISTER == 8
315 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
318 case MONO_TYPE_VALUETYPE:
319 if (type->data.klass->enumtype) {
320 type = mono_class_enum_basetype (type->data.klass);
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
326 case MONO_TYPE_TYPEDBYREF:
328 case MONO_TYPE_GENERICINST:
329 type = &type->data.generic_class->container_class->byval_arg;
333 g_assert (cfg->gshared);
334 if (mini_type_var_is_vt (type))
337 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
339 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
345 mono_print_bb (MonoBasicBlock *bb, const char *msg)
350 printf ("\n%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
354 for (i = 0; i < bb->out_count; ++i)
355 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
357 for (tree = bb->code; tree; tree = tree->next)
358 mono_print_ins_index (-1, tree);
362 mono_create_helper_signatures (void)
364 helper_sig_domain_get = mono_create_icall_signature ("ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *field_fname = mono_field_full_name (field);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
382 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
383 g_free (method_fname);
384 g_free (field_fname);
387 static MONO_NEVER_INLINE void
388 inline_failure (MonoCompile *cfg, const char *msg)
390 if (cfg->verbose_level >= 2)
391 printf ("inline failed: %s\n", msg);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
395 static MONO_NEVER_INLINE void
396 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
398 if (cfg->verbose_level > 2) \
399 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
403 static MONO_NEVER_INLINE void
404 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
407 if (cfg->verbose_level >= 2)
408 printf ("%s\n", cfg->exception_message);
409 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
413 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
414 * foo<T> (int i) { ldarg.0; box T; }
416 #define UNVERIFIED do { \
417 if (cfg->gsharedvt) { \
418 if (cfg->verbose_level > 2) \
419 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
421 goto exception_exit; \
423 break_on_unverified (); \
427 #define GET_BBLOCK(cfg,tblock,ip) do { \
428 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
430 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
431 NEW_BBLOCK (cfg, (tblock)); \
432 (tblock)->cil_code = (ip); \
433 ADD_BBLOCK (cfg, (tblock)); \
437 #if defined(TARGET_X86) || defined(TARGET_AMD64)
438 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
439 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
440 (dest)->dreg = alloc_ireg_mp ((cfg)); \
441 (dest)->sreg1 = (sr1); \
442 (dest)->sreg2 = (sr2); \
443 (dest)->inst_imm = (imm); \
444 (dest)->backend.shift_amount = (shift); \
445 MONO_ADD_INS ((cfg)->cbb, (dest)); \
449 /* Emit conversions so both operands of a binary opcode are of the same type */
451 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
453 MonoInst *arg1 = *arg1_ref;
454 MonoInst *arg2 = *arg2_ref;
457 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
458 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
461 /* Mixing r4/r8 is allowed by the spec */
462 if (arg1->type == STACK_R4) {
463 int dreg = alloc_freg (cfg);
465 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
466 conv->type = STACK_R8;
470 if (arg2->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
474 conv->type = STACK_R8;
480 #if SIZEOF_REGISTER == 8
481 /* FIXME: Need to add many more cases */
482 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
485 int dr = alloc_preg (cfg);
486 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
487 (ins)->sreg2 = widen->dreg;
492 #define ADD_BINOP(op) do { \
493 MONO_INST_NEW (cfg, ins, (op)); \
495 ins->sreg1 = sp [0]->dreg; \
496 ins->sreg2 = sp [1]->dreg; \
497 type_from_op (cfg, ins, sp [0], sp [1]); \
499 /* Have to insert a widening op */ \
500 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
501 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
502 MONO_ADD_INS ((cfg)->cbb, (ins)); \
503 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
506 #define ADD_UNOP(op) do { \
507 MONO_INST_NEW (cfg, ins, (op)); \
509 ins->sreg1 = sp [0]->dreg; \
510 type_from_op (cfg, ins, sp [0], NULL); \
512 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode (cfg, ins); \
517 #define ADD_BINCOND(next_block) do { \
520 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
521 cmp->sreg1 = sp [0]->dreg; \
522 cmp->sreg2 = sp [1]->dreg; \
523 type_from_op (cfg, cmp, sp [0], sp [1]); \
525 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
526 type_from_op (cfg, ins, sp [0], sp [1]); \
527 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
528 GET_BBLOCK (cfg, tblock, target); \
529 link_bblock (cfg, cfg->cbb, tblock); \
530 ins->inst_true_bb = tblock; \
531 if ((next_block)) { \
532 link_bblock (cfg, cfg->cbb, (next_block)); \
533 ins->inst_false_bb = (next_block); \
534 start_new_bblock = 1; \
536 GET_BBLOCK (cfg, tblock, ip); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_false_bb = tblock; \
539 start_new_bblock = 2; \
541 if (sp != stack_start) { \
542 handle_stack_args (cfg, stack_start, sp - stack_start); \
543 CHECK_UNVERIFIABLE (cfg); \
545 MONO_ADD_INS (cfg->cbb, cmp); \
546 MONO_ADD_INS (cfg->cbb, ins); \
550 * link_bblock: Links two basic blocks
552 * links two basic blocks in the control flow graph, the 'from'
553 * argument is the starting block and the 'to' argument is the block
554 * the control flow ends to after 'from'.
557 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
559 MonoBasicBlock **newa;
563 if (from->cil_code) {
565 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
567 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
570 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
572 printf ("edge from entry to exit\n");
577 for (i = 0; i < from->out_count; ++i) {
578 if (to == from->out_bb [i]) {
584 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
585 for (i = 0; i < from->out_count; ++i) {
586 newa [i] = from->out_bb [i];
594 for (i = 0; i < to->in_count; ++i) {
595 if (from == to->in_bb [i]) {
601 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
602 for (i = 0; i < to->in_count; ++i) {
603 newa [i] = to->in_bb [i];
612 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
614 link_bblock (cfg, from, to);
618 * mono_find_block_region:
620 * We mark each basic block with a region ID. We use that to avoid BB
621 * optimizations when blocks are in different regions.
624 * A region token that encodes where this region is, and information
625 * about the clause owner for this block.
627 * The region encodes the try/catch/filter clause that owns this block
628 * as well as the type. -1 is a special value that represents a block
629 * that is in none of try/catch/filter.
632 mono_find_block_region (MonoCompile *cfg, int offset)
634 MonoMethodHeader *header = cfg->header;
635 MonoExceptionClause *clause;
638 for (i = 0; i < header->num_clauses; ++i) {
639 clause = &header->clauses [i];
640 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
641 (offset < (clause->handler_offset)))
642 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
644 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
645 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
646 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
647 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
648 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
650 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
653 for (i = 0; i < header->num_clauses; ++i) {
654 clause = &header->clauses [i];
656 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
657 return ((i + 1) << 8) | clause->flags;
664 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
666 MonoMethodHeader *header = cfg->header;
667 MonoExceptionClause *clause;
671 for (i = 0; i < header->num_clauses; ++i) {
672 clause = &header->clauses [i];
673 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
674 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
675 if (clause->flags == type)
676 res = g_list_append (res, clause);
683 mono_create_spvar_for_region (MonoCompile *cfg, int region)
687 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
691 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
692 /* prevent it from being register allocated */
693 var->flags |= MONO_INST_VOLATILE;
695 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
699 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
701 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
705 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
709 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
723 * Returns the type used in the eval stack when @type is loaded.
724 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
727 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
731 type = mini_get_underlying_type (type);
732 inst->klass = klass = mono_class_from_mono_type (type);
734 inst->type = STACK_MP;
739 switch (type->type) {
741 inst->type = STACK_INV;
749 inst->type = STACK_I4;
754 case MONO_TYPE_FNPTR:
755 inst->type = STACK_PTR;
757 case MONO_TYPE_CLASS:
758 case MONO_TYPE_STRING:
759 case MONO_TYPE_OBJECT:
760 case MONO_TYPE_SZARRAY:
761 case MONO_TYPE_ARRAY:
762 inst->type = STACK_OBJ;
766 inst->type = STACK_I8;
769 inst->type = cfg->r4_stack_type;
772 inst->type = STACK_R8;
774 case MONO_TYPE_VALUETYPE:
775 if (type->data.klass->enumtype) {
776 type = mono_class_enum_basetype (type->data.klass);
780 inst->type = STACK_VTYPE;
783 case MONO_TYPE_TYPEDBYREF:
784 inst->klass = mono_defaults.typed_reference_class;
785 inst->type = STACK_VTYPE;
787 case MONO_TYPE_GENERICINST:
788 type = &type->data.generic_class->container_class->byval_arg;
792 g_assert (cfg->gshared);
793 if (mini_is_gsharedvt_type (type)) {
794 g_assert (cfg->gsharedvt);
795 inst->type = STACK_VTYPE;
797 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
801 g_error ("unknown type 0x%02x in eval stack type", type->type);
806 * The following tables are used to quickly validate the IL code in type_from_op ().
809 bin_num_table [STACK_MAX] [STACK_MAX] = {
810 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
815 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
823 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
826 /* reduce the size of this table */
828 bin_int_table [STACK_MAX] [STACK_MAX] = {
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
840 bin_comp_table [STACK_MAX] [STACK_MAX] = {
841 /* Inv i L p F & O vt r4 */
843 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
844 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
845 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
846 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
847 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
848 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
849 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
850 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
853 /* reduce the size of this table */
855 shift_table [STACK_MAX] [STACK_MAX] = {
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
867 * Tables to map from the non-specific opcode to the matching
868 * type-specific opcode.
870 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
872 binops_op_map [STACK_MAX] = {
873 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
876 /* handles from CEE_NEG to CEE_CONV_U8 */
878 unops_op_map [STACK_MAX] = {
879 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
882 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
884 ovfops_op_map [STACK_MAX] = {
885 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
888 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
890 ovf2ops_op_map [STACK_MAX] = {
891 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
894 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
896 ovf3ops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
900 /* handles from CEE_BEQ to CEE_BLT_UN */
902 beqops_op_map [STACK_MAX] = {
903 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
906 /* handles from CEE_CEQ to CEE_CLT_UN */
908 ceqops_op_map [STACK_MAX] = {
909 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
913 * Sets ins->type (the type on the eval stack) according to the
914 * type of the opcode and the arguments to it.
915 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
917 * FIXME: this function sets ins->type unconditionally in some cases, but
918 * it should set it to invalid for some types (a conv.x on an object)
921 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
923 switch (ins->opcode) {
930 /* FIXME: check unverifiable args for STACK_MP */
931 ins->type = bin_num_table [src1->type] [src2->type];
932 ins->opcode += binops_op_map [ins->type];
939 ins->type = bin_int_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
945 ins->type = shift_table [src1->type] [src2->type];
946 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
952 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
953 ins->opcode = OP_LCOMPARE;
954 else if (src1->type == STACK_R4)
955 ins->opcode = OP_RCOMPARE;
956 else if (src1->type == STACK_R8)
957 ins->opcode = OP_FCOMPARE;
959 ins->opcode = OP_ICOMPARE;
961 case OP_ICOMPARE_IMM:
962 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
963 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
964 ins->opcode = OP_LCOMPARE_IMM;
976 ins->opcode += beqops_op_map [src1->type];
979 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
980 ins->opcode += ceqops_op_map [src1->type];
986 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
987 ins->opcode += ceqops_op_map [src1->type];
991 ins->type = neg_table [src1->type];
992 ins->opcode += unops_op_map [ins->type];
995 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
996 ins->type = src1->type;
998 ins->type = STACK_INV;
999 ins->opcode += unops_op_map [ins->type];
1005 ins->type = STACK_I4;
1006 ins->opcode += unops_op_map [src1->type];
1009 ins->type = STACK_R8;
1010 switch (src1->type) {
1013 ins->opcode = OP_ICONV_TO_R_UN;
1016 ins->opcode = OP_LCONV_TO_R_UN;
1020 case CEE_CONV_OVF_I1:
1021 case CEE_CONV_OVF_U1:
1022 case CEE_CONV_OVF_I2:
1023 case CEE_CONV_OVF_U2:
1024 case CEE_CONV_OVF_I4:
1025 case CEE_CONV_OVF_U4:
1026 ins->type = STACK_I4;
1027 ins->opcode += ovf3ops_op_map [src1->type];
1029 case CEE_CONV_OVF_I_UN:
1030 case CEE_CONV_OVF_U_UN:
1031 ins->type = STACK_PTR;
1032 ins->opcode += ovf2ops_op_map [src1->type];
1034 case CEE_CONV_OVF_I1_UN:
1035 case CEE_CONV_OVF_I2_UN:
1036 case CEE_CONV_OVF_I4_UN:
1037 case CEE_CONV_OVF_U1_UN:
1038 case CEE_CONV_OVF_U2_UN:
1039 case CEE_CONV_OVF_U4_UN:
1040 ins->type = STACK_I4;
1041 ins->opcode += ovf2ops_op_map [src1->type];
1044 ins->type = STACK_PTR;
1045 switch (src1->type) {
1047 ins->opcode = OP_ICONV_TO_U;
1051 #if SIZEOF_VOID_P == 8
1052 ins->opcode = OP_LCONV_TO_U;
1054 ins->opcode = OP_MOVE;
1058 ins->opcode = OP_LCONV_TO_U;
1061 ins->opcode = OP_FCONV_TO_U;
1067 ins->type = STACK_I8;
1068 ins->opcode += unops_op_map [src1->type];
1070 case CEE_CONV_OVF_I8:
1071 case CEE_CONV_OVF_U8:
1072 ins->type = STACK_I8;
1073 ins->opcode += ovf3ops_op_map [src1->type];
1075 case CEE_CONV_OVF_U8_UN:
1076 case CEE_CONV_OVF_I8_UN:
1077 ins->type = STACK_I8;
1078 ins->opcode += ovf2ops_op_map [src1->type];
1081 ins->type = cfg->r4_stack_type;
1082 ins->opcode += unops_op_map [src1->type];
1085 ins->type = STACK_R8;
1086 ins->opcode += unops_op_map [src1->type];
1089 ins->type = STACK_R8;
1093 ins->type = STACK_I4;
1094 ins->opcode += ovfops_op_map [src1->type];
1097 case CEE_CONV_OVF_I:
1098 case CEE_CONV_OVF_U:
1099 ins->type = STACK_PTR;
1100 ins->opcode += ovfops_op_map [src1->type];
1103 case CEE_ADD_OVF_UN:
1105 case CEE_MUL_OVF_UN:
1107 case CEE_SUB_OVF_UN:
1108 ins->type = bin_num_table [src1->type] [src2->type];
1109 ins->opcode += ovfops_op_map [src1->type];
1110 if (ins->type == STACK_R8)
1111 ins->type = STACK_INV;
1113 case OP_LOAD_MEMBASE:
1114 ins->type = STACK_PTR;
1116 case OP_LOADI1_MEMBASE:
1117 case OP_LOADU1_MEMBASE:
1118 case OP_LOADI2_MEMBASE:
1119 case OP_LOADU2_MEMBASE:
1120 case OP_LOADI4_MEMBASE:
1121 case OP_LOADU4_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI8_MEMBASE:
1125 ins->type = STACK_I8;
1127 case OP_LOADR4_MEMBASE:
1128 ins->type = cfg->r4_stack_type;
1130 case OP_LOADR8_MEMBASE:
1131 ins->type = STACK_R8;
1134 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1138 if (ins->type == STACK_MP)
1139 ins->klass = mono_defaults.object_class;
1144 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1150 param_table [STACK_MAX] [STACK_MAX] = {
1155 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1160 switch (args->type) {
1170 for (i = 0; i < sig->param_count; ++i) {
1171 switch (args [i].type) {
1175 if (!sig->params [i]->byref)
1179 if (sig->params [i]->byref)
1181 switch (sig->params [i]->type) {
1182 case MONO_TYPE_CLASS:
1183 case MONO_TYPE_STRING:
1184 case MONO_TYPE_OBJECT:
1185 case MONO_TYPE_SZARRAY:
1186 case MONO_TYPE_ARRAY:
1193 if (sig->params [i]->byref)
1195 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1204 /*if (!param_table [args [i].type] [sig->params [i]->type])
1212 * When we need a pointer to the current domain many times in a method, we
1213 * call mono_domain_get() once and we store the result in a local variable.
1214 * This function returns the variable that represents the MonoDomain*.
1216 inline static MonoInst *
1217 mono_get_domainvar (MonoCompile *cfg)
1219 if (!cfg->domainvar)
1220 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1221 return cfg->domainvar;
1225 * The got_var contains the address of the Global Offset Table when AOT
1229 mono_get_got_var (MonoCompile *cfg)
1231 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1233 if (!cfg->got_var) {
1234 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1236 return cfg->got_var;
1240 mono_get_vtable_var (MonoCompile *cfg)
1242 g_assert (cfg->gshared);
1244 if (!cfg->rgctx_var) {
1245 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1246 /* force the var to be stack allocated */
1247 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1250 return cfg->rgctx_var;
1254 type_from_stack_type (MonoInst *ins) {
1255 switch (ins->type) {
1256 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1257 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1258 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1259 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1260 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1262 return &ins->klass->this_arg;
1263 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1264 case STACK_VTYPE: return &ins->klass->byval_arg;
1266 g_error ("stack type %d to monotype not handled\n", ins->type);
1271 static G_GNUC_UNUSED int
1272 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1274 t = mono_type_get_underlying_type (t);
1286 case MONO_TYPE_FNPTR:
1288 case MONO_TYPE_CLASS:
1289 case MONO_TYPE_STRING:
1290 case MONO_TYPE_OBJECT:
1291 case MONO_TYPE_SZARRAY:
1292 case MONO_TYPE_ARRAY:
1298 return cfg->r4_stack_type;
1301 case MONO_TYPE_VALUETYPE:
1302 case MONO_TYPE_TYPEDBYREF:
1304 case MONO_TYPE_GENERICINST:
1305 if (mono_type_generic_inst_is_valuetype (t))
1311 g_assert_not_reached ();
1318 array_access_to_klass (int opcode)
1322 return mono_defaults.byte_class;
1324 return mono_defaults.uint16_class;
1327 return mono_defaults.int_class;
1330 return mono_defaults.sbyte_class;
1333 return mono_defaults.int16_class;
1336 return mono_defaults.int32_class;
1338 return mono_defaults.uint32_class;
1341 return mono_defaults.int64_class;
1344 return mono_defaults.single_class;
1347 return mono_defaults.double_class;
1348 case CEE_LDELEM_REF:
1349 case CEE_STELEM_REF:
1350 return mono_defaults.object_class;
1352 g_assert_not_reached ();
1358 * We try to share variables when possible
1361 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1366 /* inlining can result in deeper stacks */
1367 if (slot >= cfg->header->max_stack)
1368 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1370 pos = ins->type - 1 + slot * STACK_MAX;
1372 switch (ins->type) {
1379 if ((vnum = cfg->intvars [pos]))
1380 return cfg->varinfo [vnum];
1381 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1382 cfg->intvars [pos] = res->inst_c0;
1385 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1391 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1394 * Don't use this if a generic_context is set, since that means AOT can't
1395 * look up the method using just the image+token.
1396 * table == 0 means this is a reference made from a wrapper.
1398 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1399 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1400 jump_info_token->image = image;
1401 jump_info_token->token = token;
1402 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1407 * This function is called to handle items that are left on the evaluation stack
1408 * at basic block boundaries. What happens is that we save the values to local variables
1409 * and we reload them later when first entering the target basic block (with the
1410 * handle_loaded_temps () function).
1411 * A single joint point will use the same variables (stored in the array bb->out_stack or
1412 * bb->in_stack, if the basic block is before or after the joint point).
1414 * This function needs to be called _before_ emitting the last instruction of
1415 * the bb (i.e. before emitting a branch).
1416 * If the stack merge fails at a join point, cfg->unverifiable is set.
1419 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1422 MonoBasicBlock *bb = cfg->cbb;
1423 MonoBasicBlock *outb;
1424 MonoInst *inst, **locals;
1429 if (cfg->verbose_level > 3)
1430 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1431 if (!bb->out_scount) {
1432 bb->out_scount = count;
1433 //printf ("bblock %d has out:", bb->block_num);
1435 for (i = 0; i < bb->out_count; ++i) {
1436 outb = bb->out_bb [i];
1437 /* exception handlers are linked, but they should not be considered for stack args */
1438 if (outb->flags & BB_EXCEPTION_HANDLER)
1440 //printf (" %d", outb->block_num);
1441 if (outb->in_stack) {
1443 bb->out_stack = outb->in_stack;
1449 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1450 for (i = 0; i < count; ++i) {
1452 * try to reuse temps already allocated for this purpouse, if they occupy the same
1453 * stack slot and if they are of the same type.
1454 * This won't cause conflicts since if 'local' is used to
1455 * store one of the values in the in_stack of a bblock, then
1456 * the same variable will be used for the same outgoing stack
1458 * This doesn't work when inlining methods, since the bblocks
1459 * in the inlined methods do not inherit their in_stack from
1460 * the bblock they are inlined to. See bug #58863 for an
1463 if (cfg->inlined_method)
1464 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1466 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1471 for (i = 0; i < bb->out_count; ++i) {
1472 outb = bb->out_bb [i];
1473 /* exception handlers are linked, but they should not be considered for stack args */
1474 if (outb->flags & BB_EXCEPTION_HANDLER)
1476 if (outb->in_scount) {
1477 if (outb->in_scount != bb->out_scount) {
1478 cfg->unverifiable = TRUE;
1481 continue; /* check they are the same locals */
1483 outb->in_scount = count;
1484 outb->in_stack = bb->out_stack;
1487 locals = bb->out_stack;
1489 for (i = 0; i < count; ++i) {
1490 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1491 inst->cil_code = sp [i]->cil_code;
1492 sp [i] = locals [i];
1493 if (cfg->verbose_level > 3)
1494 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1498 * It is possible that the out bblocks already have in_stack assigned, and
1499 * the in_stacks differ. In this case, we will store to all the different
1506 /* Find a bblock which has a different in_stack */
1508 while (bindex < bb->out_count) {
1509 outb = bb->out_bb [bindex];
1510 /* exception handlers are linked, but they should not be considered for stack args */
1511 if (outb->flags & BB_EXCEPTION_HANDLER) {
1515 if (outb->in_stack != locals) {
1516 for (i = 0; i < count; ++i) {
1517 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1518 inst->cil_code = sp [i]->cil_code;
1519 sp [i] = locals [i];
1520 if (cfg->verbose_level > 3)
1521 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1523 locals = outb->in_stack;
1533 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1537 if (cfg->compile_aot) {
1538 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1544 ji.type = patch_type;
1545 ji.data.target = data;
1546 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1547 mono_error_assert_ok (&error);
1549 EMIT_NEW_PCONST (cfg, ins, target);
1555 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1557 int ibitmap_reg = alloc_preg (cfg);
1558 #ifdef COMPRESSED_INTERFACE_BITMAP
1560 MonoInst *res, *ins;
1561 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1562 MONO_ADD_INS (cfg->cbb, ins);
1564 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1565 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1566 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1568 int ibitmap_byte_reg = alloc_preg (cfg);
1570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1572 if (cfg->compile_aot) {
1573 int iid_reg = alloc_preg (cfg);
1574 int shifted_iid_reg = alloc_preg (cfg);
1575 int ibitmap_byte_address_reg = alloc_preg (cfg);
1576 int masked_iid_reg = alloc_preg (cfg);
1577 int iid_one_bit_reg = alloc_preg (cfg);
1578 int iid_bit_reg = alloc_preg (cfg);
1579 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1581 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1582 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1584 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1585 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1586 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1595 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1596 * stored in "klass_reg" implements the interface "klass".
1599 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1601 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1605 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1606 * stored in "vtable_reg" implements the interface "klass".
1609 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1611 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1615 * Emit code which checks whenever the interface id of @klass is smaller than
1616 * than the value given by max_iid_reg.
1619 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1620 MonoBasicBlock *false_target)
1622 if (cfg->compile_aot) {
1623 int iid_reg = alloc_preg (cfg);
1624 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1625 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1632 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1635 /* Same as above, but obtains max_iid from a vtable */
1637 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1638 MonoBasicBlock *false_target)
1640 int max_iid_reg = alloc_preg (cfg);
1642 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1643 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1646 /* Same as above, but obtains max_iid from a klass */
1648 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1649 MonoBasicBlock *false_target)
1651 int max_iid_reg = alloc_preg (cfg);
1653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1654 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1658 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1660 int idepth_reg = alloc_preg (cfg);
1661 int stypes_reg = alloc_preg (cfg);
1662 int stype = alloc_preg (cfg);
1664 mono_class_setup_supertypes (klass);
1666 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1667 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1669 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1675 } else if (cfg->compile_aot) {
1676 int const_reg = alloc_preg (cfg);
1677 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1678 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1686 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1688 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1692 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1694 int intf_reg = alloc_preg (cfg);
1696 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1697 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1698 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1702 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1706 * Variant of the above that takes a register to the class, not the vtable.
1709 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1711 int intf_bit_reg = alloc_preg (cfg);
1713 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1714 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1719 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1723 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1726 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1728 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1731 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1735 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1737 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1741 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1743 if (cfg->compile_aot) {
1744 int const_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1746 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1750 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1754 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1757 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1760 int rank_reg = alloc_preg (cfg);
1761 int eclass_reg = alloc_preg (cfg);
1763 g_assert (!klass_inst);
1764 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1766 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1767 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1769 if (klass->cast_class == mono_defaults.object_class) {
1770 int parent_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1772 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1773 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1774 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1775 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1776 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1777 } else if (klass->cast_class == mono_defaults.enum_class) {
1778 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1779 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1780 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1782 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1783 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1786 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1787 /* Check that the object is a vector too */
1788 int bounds_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1791 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1794 int idepth_reg = alloc_preg (cfg);
1795 int stypes_reg = alloc_preg (cfg);
1796 int stype = alloc_preg (cfg);
1798 mono_class_setup_supertypes (klass);
1800 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1802 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1803 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1807 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1812 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1814 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1818 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1822 g_assert (val == 0);
1827 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1830 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1833 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1836 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1838 #if SIZEOF_REGISTER == 8
1840 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1846 val_reg = alloc_preg (cfg);
1848 if (SIZEOF_REGISTER == 8)
1849 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1851 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1854 /* This could be optimized further if neccesary */
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1863 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1882 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1887 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1894 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1901 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1902 g_assert (size < 10000);
1905 /* This could be optimized further if neccesary */
1907 cur_reg = alloc_preg (cfg);
1908 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1916 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1918 cur_reg = alloc_preg (cfg);
1919 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1920 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1928 cur_reg = alloc_preg (cfg);
1929 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1930 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1936 cur_reg = alloc_preg (cfg);
1937 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1938 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1944 cur_reg = alloc_preg (cfg);
1945 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1946 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1954 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1958 if (cfg->compile_aot) {
1959 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1960 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1962 ins->sreg2 = c->dreg;
1963 MONO_ADD_INS (cfg->cbb, ins);
1965 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1967 ins->inst_offset = mini_get_tls_offset (tls_key);
1968 MONO_ADD_INS (cfg->cbb, ins);
1975 * Emit IR to push the current LMF onto the LMF stack.
1978 emit_push_lmf (MonoCompile *cfg)
1981 * Emit IR to push the LMF:
1982 * lmf_addr = <lmf_addr from tls>
1983 * lmf->lmf_addr = lmf_addr
1984 * lmf->prev_lmf = *lmf_addr
1987 int lmf_reg, prev_lmf_reg;
1988 MonoInst *ins, *lmf_ins;
1993 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1994 /* Load current lmf */
1995 lmf_ins = mono_get_lmf_intrinsic (cfg);
1997 MONO_ADD_INS (cfg->cbb, lmf_ins);
1998 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1999 lmf_reg = ins->dreg;
2000 /* Save previous_lmf */
2001 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2003 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2006 * Store lmf_addr in a variable, so it can be allocated to a global register.
2008 if (!cfg->lmf_addr_var)
2009 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2012 ins = mono_get_jit_tls_intrinsic (cfg);
2014 int jit_tls_dreg = ins->dreg;
2016 MONO_ADD_INS (cfg->cbb, ins);
2017 lmf_reg = alloc_preg (cfg);
2018 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2020 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2023 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2025 MONO_ADD_INS (cfg->cbb, lmf_ins);
2028 MonoInst *args [16], *jit_tls_ins, *ins;
2030 /* Inline mono_get_lmf_addr () */
2031 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2033 /* Load mono_jit_tls_id */
2034 if (cfg->compile_aot)
2035 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2037 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2038 /* call pthread_getspecific () */
2039 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2040 /* lmf_addr = &jit_tls->lmf */
2041 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2044 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2048 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2050 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2051 lmf_reg = ins->dreg;
2053 prev_lmf_reg = alloc_preg (cfg);
2054 /* Save previous_lmf */
2055 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2056 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2058 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2065 * Emit IR to pop the current LMF from the LMF stack.
2068 emit_pop_lmf (MonoCompile *cfg)
2070 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2076 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2077 lmf_reg = ins->dreg;
2079 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2080 /* Load previous_lmf */
2081 prev_lmf_reg = alloc_preg (cfg);
2082 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2084 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2087 * Emit IR to pop the LMF:
2088 * *(lmf->lmf_addr) = lmf->prev_lmf
2090 /* This could be called before emit_push_lmf () */
2091 if (!cfg->lmf_addr_var)
2092 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2093 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2095 prev_lmf_reg = alloc_preg (cfg);
2096 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2097 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2102 emit_instrumentation_call (MonoCompile *cfg, void *func)
2104 MonoInst *iargs [1];
2107 * Avoid instrumenting inlined methods since it can
2108 * distort profiling results.
2110 if (cfg->method != cfg->current_method)
2113 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2114 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2115 mono_emit_jit_icall (cfg, func, iargs);
2120 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2123 type = mini_get_underlying_type (type);
2124 switch (type->type) {
2125 case MONO_TYPE_VOID:
2126 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2133 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2137 case MONO_TYPE_FNPTR:
2138 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2139 case MONO_TYPE_CLASS:
2140 case MONO_TYPE_STRING:
2141 case MONO_TYPE_OBJECT:
2142 case MONO_TYPE_SZARRAY:
2143 case MONO_TYPE_ARRAY:
2144 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2147 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2150 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2152 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2154 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2155 case MONO_TYPE_VALUETYPE:
2156 if (type->data.klass->enumtype) {
2157 type = mono_class_enum_basetype (type->data.klass);
2160 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2161 case MONO_TYPE_TYPEDBYREF:
2162 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2163 case MONO_TYPE_GENERICINST:
2164 type = &type->data.generic_class->container_class->byval_arg;
2167 case MONO_TYPE_MVAR:
2169 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2171 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2176 //XXX this ignores if t is byref
2177 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
2180 * target_type_is_incompatible:
2181 * @cfg: MonoCompile context
2183 * Check that the item @arg on the evaluation stack can be stored
2184 * in the target type (can be a local, or field, etc).
2185 * The cfg arg can be used to check if we need verification or just
2188 * Returns: non-0 value if arg can't be stored on a target.
2191 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2193 MonoType *simple_type;
2196 if (target->byref) {
2197 /* FIXME: check that the pointed to types match */
2198 if (arg->type == STACK_MP) {
2199 if (cfg->verbose_level) printf ("ok\n");
2200 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2201 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2202 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2204 /* if the target is native int& or same type */
2205 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2208 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2209 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2210 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2214 if (arg->type == STACK_PTR)
2219 simple_type = mini_get_underlying_type (target);
2220 switch (simple_type->type) {
2221 case MONO_TYPE_VOID:
2229 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2233 /* STACK_MP is needed when setting pinned locals */
2234 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2239 case MONO_TYPE_FNPTR:
2241 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2242 * in native int. (#688008).
2244 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2247 case MONO_TYPE_CLASS:
2248 case MONO_TYPE_STRING:
2249 case MONO_TYPE_OBJECT:
2250 case MONO_TYPE_SZARRAY:
2251 case MONO_TYPE_ARRAY:
2252 if (arg->type != STACK_OBJ)
2254 /* FIXME: check type compatibility */
2258 if (arg->type != STACK_I8)
2262 if (arg->type != cfg->r4_stack_type)
2266 if (arg->type != STACK_R8)
2269 case MONO_TYPE_VALUETYPE:
2270 if (arg->type != STACK_VTYPE)
2272 klass = mono_class_from_mono_type (simple_type);
2273 if (klass != arg->klass)
2276 case MONO_TYPE_TYPEDBYREF:
2277 if (arg->type != STACK_VTYPE)
2279 klass = mono_class_from_mono_type (simple_type);
2280 if (klass != arg->klass)
2283 case MONO_TYPE_GENERICINST:
2284 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2285 MonoClass *target_class;
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 target_class = mono_class_from_mono_type (target);
2290 /* The second cases is needed when doing partial sharing */
2291 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2295 if (arg->type != STACK_OBJ)
2297 /* FIXME: check type compatibility */
2301 case MONO_TYPE_MVAR:
2302 g_assert (cfg->gshared);
2303 if (mini_type_var_is_vt (simple_type)) {
2304 if (arg->type != STACK_VTYPE)
2307 if (arg->type != STACK_OBJ)
2312 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2318 * Prepare arguments for passing to a function call.
2319 * Return a non-zero value if the arguments can't be passed to the given
2321 * The type checks are not yet complete and some conversions may need
2322 * casts on 32 or 64 bit architectures.
2324 * FIXME: implement this using target_type_is_incompatible ()
2327 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2329 MonoType *simple_type;
2333 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2337 for (i = 0; i < sig->param_count; ++i) {
2338 if (sig->params [i]->byref) {
2339 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2343 simple_type = mini_get_underlying_type (sig->params [i]);
2345 switch (simple_type->type) {
2346 case MONO_TYPE_VOID:
2355 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2361 case MONO_TYPE_FNPTR:
2362 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2365 case MONO_TYPE_CLASS:
2366 case MONO_TYPE_STRING:
2367 case MONO_TYPE_OBJECT:
2368 case MONO_TYPE_SZARRAY:
2369 case MONO_TYPE_ARRAY:
2370 if (args [i]->type != STACK_OBJ)
2375 if (args [i]->type != STACK_I8)
2379 if (args [i]->type != cfg->r4_stack_type)
2383 if (args [i]->type != STACK_R8)
2386 case MONO_TYPE_VALUETYPE:
2387 if (simple_type->data.klass->enumtype) {
2388 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2391 if (args [i]->type != STACK_VTYPE)
2394 case MONO_TYPE_TYPEDBYREF:
2395 if (args [i]->type != STACK_VTYPE)
2398 case MONO_TYPE_GENERICINST:
2399 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2402 case MONO_TYPE_MVAR:
2404 if (args [i]->type != STACK_VTYPE)
2408 g_error ("unknown type 0x%02x in check_call_signature",
2416 callvirt_to_call (int opcode)
2419 case OP_CALL_MEMBASE:
2421 case OP_VOIDCALL_MEMBASE:
2423 case OP_FCALL_MEMBASE:
2425 case OP_RCALL_MEMBASE:
2427 case OP_VCALL_MEMBASE:
2429 case OP_LCALL_MEMBASE:
2432 g_assert_not_reached ();
2439 callvirt_to_call_reg (int opcode)
2442 case OP_CALL_MEMBASE:
2444 case OP_VOIDCALL_MEMBASE:
2445 return OP_VOIDCALL_REG;
2446 case OP_FCALL_MEMBASE:
2447 return OP_FCALL_REG;
2448 case OP_RCALL_MEMBASE:
2449 return OP_RCALL_REG;
2450 case OP_VCALL_MEMBASE:
2451 return OP_VCALL_REG;
2452 case OP_LCALL_MEMBASE:
2453 return OP_LCALL_REG;
2455 g_assert_not_reached ();
2461 /* Either METHOD or IMT_ARG needs to be set */
2463 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2467 if (COMPILE_LLVM (cfg)) {
2469 method_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2472 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2473 method_reg = ins->dreg;
2477 call->imt_arg_reg = method_reg;
2479 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2484 method_reg = alloc_preg (cfg);
2485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2487 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2488 method_reg = ins->dreg;
2491 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2494 static MonoJumpInfo *
2495 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2497 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2501 ji->data.target = target;
2507 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2510 return mono_class_check_context_used (klass);
2516 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2519 return mono_method_check_context_used (method);
2525 * check_method_sharing:
2527 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2530 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2532 gboolean pass_vtable = FALSE;
2533 gboolean pass_mrgctx = FALSE;
2535 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2536 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2537 gboolean sharable = FALSE;
2539 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2543 * Pass vtable iff target method might
2544 * be shared, which means that sharing
2545 * is enabled for its class and its
2546 * context is sharable (and it's not a
2549 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2553 if (mini_method_get_context (cmethod) &&
2554 mini_method_get_context (cmethod)->method_inst) {
2555 g_assert (!pass_vtable);
2557 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2560 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2565 if (out_pass_vtable)
2566 *out_pass_vtable = pass_vtable;
2567 if (out_pass_mrgctx)
2568 *out_pass_mrgctx = pass_mrgctx;
2571 inline static MonoCallInst *
2572 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2573 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2577 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2585 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2587 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2589 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2592 call->signature = sig;
2593 call->rgctx_reg = rgctx;
2594 sig_ret = mini_get_underlying_type (sig->ret);
2596 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2599 if (mini_type_is_vtype (sig_ret)) {
2600 call->vret_var = cfg->vret_addr;
2601 //g_assert_not_reached ();
2603 } else if (mini_type_is_vtype (sig_ret)) {
2604 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2607 temp->backend.is_pinvoke = sig->pinvoke;
2610 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2611 * address of return value to increase optimization opportunities.
2612 * Before vtype decomposition, the dreg of the call ins itself represents the
2613 * fact the call modifies the return value. After decomposition, the call will
2614 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2615 * will be transformed into an LDADDR.
2617 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2618 loada->dreg = alloc_preg (cfg);
2619 loada->inst_p0 = temp;
2620 /* We reference the call too since call->dreg could change during optimization */
2621 loada->inst_p1 = call;
2622 MONO_ADD_INS (cfg->cbb, loada);
2624 call->inst.dreg = temp->dreg;
2626 call->vret_var = loada;
2627 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2628 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2630 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2631 if (COMPILE_SOFT_FLOAT (cfg)) {
2633 * If the call has a float argument, we would need to do an r8->r4 conversion using
2634 * an icall, but that cannot be done during the call sequence since it would clobber
2635 * the call registers + the stack. So we do it before emitting the call.
2637 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2639 MonoInst *in = call->args [i];
2641 if (i >= sig->hasthis)
2642 t = sig->params [i - sig->hasthis];
2644 t = &mono_defaults.int_class->byval_arg;
2645 t = mono_type_get_underlying_type (t);
2647 if (!t->byref && t->type == MONO_TYPE_R4) {
2648 MonoInst *iargs [1];
2652 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2654 /* The result will be in an int vreg */
2655 call->args [i] = conv;
2661 call->need_unbox_trampoline = unbox_trampoline;
2664 if (COMPILE_LLVM (cfg))
2665 mono_llvm_emit_call (cfg, call);
2667 mono_arch_emit_call (cfg, call);
2669 mono_arch_emit_call (cfg, call);
2672 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2673 cfg->flags |= MONO_CFG_HAS_CALLS;
2679 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2681 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2682 cfg->uses_rgctx_reg = TRUE;
2683 call->rgctx_reg = TRUE;
2685 call->rgctx_arg_reg = rgctx_reg;
2689 inline static MonoInst*
2690 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2695 gboolean check_sp = FALSE;
2697 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2698 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2700 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2705 rgctx_reg = mono_alloc_preg (cfg);
2706 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2710 if (!cfg->stack_inbalance_var)
2711 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2713 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 ins->dreg = cfg->stack_inbalance_var->dreg;
2715 MONO_ADD_INS (cfg->cbb, ins);
2718 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2720 call->inst.sreg1 = addr->dreg;
2723 emit_imt_argument (cfg, call, NULL, imt_arg);
2725 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2730 sp_reg = mono_alloc_preg (cfg);
2732 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2734 MONO_ADD_INS (cfg->cbb, ins);
2736 /* Restore the stack so we don't crash when throwing the exception */
2737 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2738 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2739 MONO_ADD_INS (cfg->cbb, ins);
2741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2742 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2746 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2748 return (MonoInst*)call;
2752 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2755 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2757 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2760 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2761 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2763 #ifndef DISABLE_REMOTING
2764 gboolean might_be_remote = FALSE;
2766 gboolean virtual_ = this_ins != NULL;
2767 gboolean enable_for_aot = TRUE;
2770 MonoInst *call_target = NULL;
2772 gboolean need_unbox_trampoline;
2775 sig = mono_method_signature (method);
2777 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2778 g_assert_not_reached ();
2781 rgctx_reg = mono_alloc_preg (cfg);
2782 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2785 if (method->string_ctor) {
2786 /* Create the real signature */
2787 /* FIXME: Cache these */
2788 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2789 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2794 context_used = mini_method_check_context_used (cfg, method);
2796 #ifndef DISABLE_REMOTING
2797 might_be_remote = this_ins && sig->hasthis &&
2798 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2799 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2801 if (might_be_remote && context_used) {
2804 g_assert (cfg->gshared);
2806 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2808 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2812 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2813 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2815 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2817 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2819 #ifndef DISABLE_REMOTING
2820 if (might_be_remote)
2821 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2824 call->method = method;
2825 call->inst.flags |= MONO_INST_HAS_METHOD;
2826 call->inst.inst_left = this_ins;
2827 call->tail_call = tail;
2830 int vtable_reg, slot_reg, this_reg;
2833 this_reg = this_ins->dreg;
2835 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2836 MonoInst *dummy_use;
2838 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2840 /* Make a call to delegate->invoke_impl */
2841 call->inst.inst_basereg = this_reg;
2842 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2845 /* We must emit a dummy use here because the delegate trampoline will
2846 replace the 'this' argument with the delegate target making this activation
2847 no longer a root for the delegate.
2848 This is an issue for delegates that target collectible code such as dynamic
2849 methods of GC'able assemblies.
2851 For a test case look into #667921.
2853 FIXME: a dummy use is not the best way to do it as the local register allocator
2854 will put it on a caller save register and spil it around the call.
2855 Ideally, we would either put it on a callee save register or only do the store part.
2857 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2859 return (MonoInst*)call;
2862 if ((!cfg->compile_aot || enable_for_aot) &&
2863 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2864 (MONO_METHOD_IS_FINAL (method) &&
2865 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2866 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2868 * the method is not virtual, we just need to ensure this is not null
2869 * and then we can call the method directly.
2871 #ifndef DISABLE_REMOTING
2872 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2874 * The check above ensures method is not gshared, this is needed since
2875 * gshared methods can't have wrappers.
2877 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2881 if (!method->string_ctor)
2882 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2884 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2885 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2887 * the method is virtual, but we can statically dispatch since either
2888 * it's class or the method itself are sealed.
2889 * But first we need to ensure it's not a null reference.
2891 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2893 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2894 } else if (call_target) {
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2898 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2899 call->inst.sreg1 = call_target->dreg;
2900 call->inst.flags &= !MONO_INST_HAS_METHOD;
2902 vtable_reg = alloc_preg (cfg);
2903 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2904 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2905 guint32 imt_slot = mono_method_get_imt_slot (method);
2906 emit_imt_argument (cfg, call, call->method, imt_arg);
2907 slot_reg = vtable_reg;
2908 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2910 slot_reg = vtable_reg;
2911 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2912 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2914 g_assert (mono_method_signature (method)->generic_param_count);
2915 emit_imt_argument (cfg, call, call->method, imt_arg);
2919 call->inst.sreg1 = slot_reg;
2920 call->inst.inst_offset = offset;
2921 call->is_virtual = TRUE;
2925 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2928 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2930 return (MonoInst*)call;
2934 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2936 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2940 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2947 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2950 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2952 return (MonoInst*)call;
2956 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2958 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2962 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2966 * mono_emit_abs_call:
2968 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2970 inline static MonoInst*
2971 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2972 MonoMethodSignature *sig, MonoInst **args)
2974 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2978 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2981 if (cfg->abs_patches == NULL)
2982 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2983 g_hash_table_insert (cfg->abs_patches, ji, ji);
2984 ins = mono_emit_native_call (cfg, ji, sig, args);
2985 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2989 static MonoMethodSignature*
2990 sig_to_rgctx_sig (MonoMethodSignature *sig)
2992 // FIXME: memory allocation
2993 MonoMethodSignature *res;
2996 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2997 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2998 res->param_count = sig->param_count + 1;
2999 for (i = 0; i < sig->param_count; ++i)
3000 res->params [i] = sig->params [i];
3001 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3005 /* Make an indirect call to FSIG passing an additional argument */
3007 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3009 MonoMethodSignature *csig;
3010 MonoInst *args_buf [16];
3012 int i, pindex, tmp_reg;
3014 /* Make a call with an rgctx/extra arg */
3015 if (fsig->param_count + 2 < 16)
3018 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3021 args [pindex ++] = orig_args [0];
3022 for (i = 0; i < fsig->param_count; ++i)
3023 args [pindex ++] = orig_args [fsig->hasthis + i];
3024 tmp_reg = alloc_preg (cfg);
3025 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3026 csig = sig_to_rgctx_sig (fsig);
3027 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3030 /* Emit an indirect call to the function descriptor ADDR */
3032 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3034 int addr_reg, arg_reg;
3035 MonoInst *call_target;
3037 g_assert (cfg->llvm_only);
3040 * addr points to a <addr, arg> pair, load both of them, and
3041 * make a call to addr, passing arg as an extra arg.
3043 addr_reg = alloc_preg (cfg);
3044 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3045 arg_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3048 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3052 direct_icalls_enabled (MonoCompile *cfg)
3054 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3056 if (cfg->compile_llvm && !cfg->llvm_only)
3059 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3065 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
3068 * Call the jit icall without a wrapper if possible.
3069 * The wrapper is needed for the following reasons:
3070 * - to handle exceptions thrown using mono_raise_exceptions () from the
3071 * icall function. The EH code needs the lmf frame pushed by the
3072 * wrapper to be able to unwind back to managed code.
3073 * - to be able to do stack walks for asynchronously suspended
3074 * threads when debugging.
3076 if (info->no_raise && direct_icalls_enabled (cfg)) {
3080 if (!info->wrapper_method) {
3081 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3082 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3084 mono_memory_barrier ();
3088 * Inline the wrapper method, which is basically a call to the C icall, and
3089 * an exception check.
3091 costs = inline_method (cfg, info->wrapper_method, NULL,
3092 args, NULL, il_offset, TRUE);
3093 g_assert (costs > 0);
3094 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3098 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3103 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3105 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3106 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3110 * Native code might return non register sized integers
3111 * without initializing the upper bits.
3113 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3114 case OP_LOADI1_MEMBASE:
3115 widen_op = OP_ICONV_TO_I1;
3117 case OP_LOADU1_MEMBASE:
3118 widen_op = OP_ICONV_TO_U1;
3120 case OP_LOADI2_MEMBASE:
3121 widen_op = OP_ICONV_TO_I2;
3123 case OP_LOADU2_MEMBASE:
3124 widen_op = OP_ICONV_TO_U2;
3130 if (widen_op != -1) {
3131 int dreg = alloc_preg (cfg);
3134 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3135 widen->type = ins->type;
3146 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
3148 char *caller_fname = mono_method_full_name (method, TRUE);
3149 char *callee_fname = mono_method_full_name (cil_method, TRUE);
3150 MonoInst *args [16];
3153 n1 = mono_domain_alloc (cfg->domain, strlen (caller_fname) + 1);
3154 strcmp (n1, caller_fname);
3155 n2 = mono_domain_alloc (cfg->domain, strlen (callee_fname) + 1);
3156 strcmp (n1, callee_fname);
3157 g_free (caller_fname);
3158 g_free (callee_fname);
3160 EMIT_NEW_PCONST (cfg, args [0], n1);
3161 EMIT_NEW_PCONST (cfg, args [1], n2);
3163 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
3167 get_memcpy_method (void)
3169 static MonoMethod *memcpy_method = NULL;
3170 if (!memcpy_method) {
3171 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3173 g_error ("Old corlib found. Install a new one");
3175 return memcpy_method;
3179 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3181 MonoClassField *field;
3182 gpointer iter = NULL;
3184 while ((field = mono_class_get_fields (klass, &iter))) {
3187 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3189 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3190 if (mini_type_is_reference (mono_field_get_type (field))) {
3191 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3192 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3194 MonoClass *field_class = mono_class_from_mono_type (field->type);
3195 if (field_class->has_references)
3196 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3202 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3204 int card_table_shift_bits;
3205 gpointer card_table_mask;
3207 MonoInst *dummy_use;
3208 int nursery_shift_bits;
3209 size_t nursery_size;
3211 if (!cfg->gen_write_barriers)
3214 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3216 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3218 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3221 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3222 wbarrier->sreg1 = ptr->dreg;
3223 wbarrier->sreg2 = value->dreg;
3224 MONO_ADD_INS (cfg->cbb, wbarrier);
3225 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3226 int offset_reg = alloc_preg (cfg);
3230 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3231 if (card_table_mask)
3232 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3234 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3235 * IMM's larger than 32bits.
3237 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3238 card_reg = ins->dreg;
3240 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3241 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3243 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3244 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3247 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3251 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3253 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3254 unsigned need_wb = 0;
3259 /*types with references can't have alignment smaller than sizeof(void*) */
3260 if (align < SIZEOF_VOID_P)
3263 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3264 if (size > 32 * SIZEOF_VOID_P)
3267 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3269 /* We don't unroll more than 5 stores to avoid code bloat. */
3270 if (size > 5 * SIZEOF_VOID_P) {
3271 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3272 size += (SIZEOF_VOID_P - 1);
3273 size &= ~(SIZEOF_VOID_P - 1);
3275 EMIT_NEW_ICONST (cfg, iargs [2], size);
3276 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3277 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3281 destreg = iargs [0]->dreg;
3282 srcreg = iargs [1]->dreg;
3285 dest_ptr_reg = alloc_preg (cfg);
3286 tmp_reg = alloc_preg (cfg);
3289 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3291 while (size >= SIZEOF_VOID_P) {
3292 MonoInst *load_inst;
3293 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3294 load_inst->dreg = tmp_reg;
3295 load_inst->inst_basereg = srcreg;
3296 load_inst->inst_offset = offset;
3297 MONO_ADD_INS (cfg->cbb, load_inst);
3299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3302 emit_write_barrier (cfg, iargs [0], load_inst);
3304 offset += SIZEOF_VOID_P;
3305 size -= SIZEOF_VOID_P;
3308 /*tmp += sizeof (void*)*/
3309 if (size >= SIZEOF_VOID_P) {
3310 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3311 MONO_ADD_INS (cfg->cbb, iargs [0]);
3315 /* Those cannot be references since size < sizeof (void*) */
3317 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3324 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3341 * Emit code to copy a valuetype of type @klass whose address is stored in
3342 * @src->dreg to memory whose address is stored at @dest->dreg.
3345 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3347 MonoInst *iargs [4];
3350 MonoMethod *memcpy_method;
3351 MonoInst *size_ins = NULL;
3352 MonoInst *memcpy_ins = NULL;
3356 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3359 * This check breaks with spilled vars... need to handle it during verification anyway.
3360 * g_assert (klass && klass == src->klass && klass == dest->klass);
3363 if (mini_is_gsharedvt_klass (klass)) {
3365 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3366 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3370 n = mono_class_native_size (klass, &align);
3372 n = mono_class_value_size (klass, &align);
3374 /* if native is true there should be no references in the struct */
3375 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3376 /* Avoid barriers when storing to the stack */
3377 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3378 (dest->opcode == OP_LDADDR))) {
3384 context_used = mini_class_check_context_used (cfg, klass);
3386 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3387 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3389 } else if (context_used) {
3390 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3392 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3393 if (!cfg->compile_aot)
3394 mono_class_compute_gc_descriptor (klass);
3398 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3400 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3405 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3406 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3407 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3412 iargs [2] = size_ins;
3414 EMIT_NEW_ICONST (cfg, iargs [2], n);
3416 memcpy_method = get_memcpy_method ();
3418 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3420 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3425 get_memset_method (void)
3427 static MonoMethod *memset_method = NULL;
3428 if (!memset_method) {
3429 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3431 g_error ("Old corlib found. Install a new one");
3433 return memset_method;
3437 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3439 MonoInst *iargs [3];
3442 MonoMethod *memset_method;
3443 MonoInst *size_ins = NULL;
3444 MonoInst *bzero_ins = NULL;
3445 static MonoMethod *bzero_method;
3447 /* FIXME: Optimize this for the case when dest is an LDADDR */
3448 mono_class_init (klass);
3449 if (mini_is_gsharedvt_klass (klass)) {
3450 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3451 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3453 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3454 g_assert (bzero_method);
3456 iargs [1] = size_ins;
3457 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3461 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3463 n = mono_class_value_size (klass, &align);
3465 if (n <= sizeof (gpointer) * 8) {
3466 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3469 memset_method = get_memset_method ();
3471 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3472 EMIT_NEW_ICONST (cfg, iargs [2], n);
3473 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3480 * Emit IR to return either the this pointer for instance method,
3481 * or the mrgctx for static methods.
3484 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3486 MonoInst *this_ins = NULL;
3488 g_assert (cfg->gshared);
3490 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3491 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3492 !method->klass->valuetype)
3493 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3495 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3496 MonoInst *mrgctx_loc, *mrgctx_var;
3498 g_assert (!this_ins);
3499 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3501 mrgctx_loc = mono_get_vtable_var (cfg);
3502 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3505 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3506 MonoInst *vtable_loc, *vtable_var;
3508 g_assert (!this_ins);
3510 vtable_loc = mono_get_vtable_var (cfg);
3511 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3513 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3514 MonoInst *mrgctx_var = vtable_var;
3517 vtable_reg = alloc_preg (cfg);
3518 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3519 vtable_var->type = STACK_PTR;
3527 vtable_reg = alloc_preg (cfg);
3528 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3533 static MonoJumpInfoRgctxEntry *
3534 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3536 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3537 res->method = method;
3538 res->in_mrgctx = in_mrgctx;
3539 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3540 res->data->type = patch_type;
3541 res->data->data.target = patch_data;
3542 res->info_type = info_type;
3547 static inline MonoInst*
3548 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3550 MonoInst *args [16];
3553 // FIXME: No fastpath since the slot is not a compile time constant
3555 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3556 if (entry->in_mrgctx)
3557 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3559 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3563 * FIXME: This can be called during decompose, which is a problem since it creates
3565 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3567 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3569 MonoBasicBlock *is_null_bb, *end_bb;
3570 MonoInst *res, *ins, *call;
3573 slot = mini_get_rgctx_entry_slot (entry);
3575 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3576 index = MONO_RGCTX_SLOT_INDEX (slot);
3578 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3579 for (depth = 0; ; ++depth) {
3580 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3582 if (index < size - 1)
3587 NEW_BBLOCK (cfg, end_bb);
3588 NEW_BBLOCK (cfg, is_null_bb);
3591 rgctx_reg = rgctx->dreg;
3593 rgctx_reg = alloc_preg (cfg);
3595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3596 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3597 NEW_BBLOCK (cfg, is_null_bb);
3599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3603 for (i = 0; i < depth; ++i) {
3604 int array_reg = alloc_preg (cfg);
3606 /* load ptr to next array */
3607 if (mrgctx && i == 0)
3608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3611 rgctx_reg = array_reg;
3612 /* is the ptr null? */
3613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3614 /* if yes, jump to actual trampoline */
3615 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3619 val_reg = alloc_preg (cfg);
3620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3621 /* is the slot null? */
3622 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3623 /* if yes, jump to actual trampoline */
3624 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3627 res_reg = alloc_preg (cfg);
3628 MONO_INST_NEW (cfg, ins, OP_MOVE);
3629 ins->dreg = res_reg;
3630 ins->sreg1 = val_reg;
3631 MONO_ADD_INS (cfg->cbb, ins);
3633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3636 MONO_START_BB (cfg, is_null_bb);
3638 EMIT_NEW_ICONST (cfg, args [1], index);
3640 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3642 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3643 MONO_INST_NEW (cfg, ins, OP_MOVE);
3644 ins->dreg = res_reg;
3645 ins->sreg1 = call->dreg;
3646 MONO_ADD_INS (cfg->cbb, ins);
3647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3649 MONO_START_BB (cfg, end_bb);
3658 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3661 static inline MonoInst*
3662 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3665 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3667 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3671 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3672 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3674 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3675 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3677 return emit_rgctx_fetch (cfg, rgctx, entry);
3681 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3682 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3684 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3685 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3687 return emit_rgctx_fetch (cfg, rgctx, entry);
3691 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3692 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3694 MonoJumpInfoGSharedVtCall *call_info;
3695 MonoJumpInfoRgctxEntry *entry;
3698 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3699 call_info->sig = sig;
3700 call_info->method = cmethod;
3702 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3703 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3705 return emit_rgctx_fetch (cfg, rgctx, entry);
3709 * emit_get_rgctx_virt_method:
3711 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3714 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3715 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3717 MonoJumpInfoVirtMethod *info;
3718 MonoJumpInfoRgctxEntry *entry;
3721 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3722 info->klass = klass;
3723 info->method = virt_method;
3725 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3726 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3728 return emit_rgctx_fetch (cfg, rgctx, entry);
3732 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3733 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3735 MonoJumpInfoRgctxEntry *entry;
3738 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3739 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3741 return emit_rgctx_fetch (cfg, rgctx, entry);
3745 * emit_get_rgctx_method:
3747 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3748 * normal constants, else emit a load from the rgctx.
3751 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3752 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3754 if (!context_used) {
3757 switch (rgctx_type) {
3758 case MONO_RGCTX_INFO_METHOD:
3759 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3761 case MONO_RGCTX_INFO_METHOD_RGCTX:
3762 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3765 g_assert_not_reached ();
3768 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3769 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3771 return emit_rgctx_fetch (cfg, rgctx, entry);
3776 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3777 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3779 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3780 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3782 return emit_rgctx_fetch (cfg, rgctx, entry);
3786 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3788 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3789 MonoRuntimeGenericContextInfoTemplate *template_;
3794 for (i = 0; i < info->num_entries; ++i) {
3795 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3797 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3801 if (info->num_entries == info->count_entries) {
3802 MonoRuntimeGenericContextInfoTemplate *new_entries;
3803 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3805 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3807 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3808 info->entries = new_entries;
3809 info->count_entries = new_count_entries;
3812 idx = info->num_entries;
3813 template_ = &info->entries [idx];
3814 template_->info_type = rgctx_type;
3815 template_->data = data;
3817 info->num_entries ++;
3823 * emit_get_gsharedvt_info:
3825 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3828 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3833 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3834 /* Load info->entries [idx] */
3835 dreg = alloc_preg (cfg);
3836 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3842 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3844 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3848 * On return the caller must check @klass for load errors.
3851 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3853 MonoInst *vtable_arg;
3856 context_used = mini_class_check_context_used (cfg, klass);
3859 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3860 klass, MONO_RGCTX_INFO_VTABLE);
3862 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3866 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3869 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3873 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3874 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3876 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3877 ins->sreg1 = vtable_arg->dreg;
3878 MONO_ADD_INS (cfg->cbb, ins);
3880 static int byte_offset = -1;
3881 static guint8 bitmask;
3882 int bits_reg, inited_reg;
3883 MonoBasicBlock *inited_bb;
3884 MonoInst *args [16];
3886 if (byte_offset < 0)
3887 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3889 bits_reg = alloc_ireg (cfg);
3890 inited_reg = alloc_ireg (cfg);
3892 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3893 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3895 NEW_BBLOCK (cfg, inited_bb);
3897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3898 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3900 args [0] = vtable_arg;
3901 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3903 MONO_START_BB (cfg, inited_bb);
3908 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3912 if (cfg->gen_seq_points && cfg->method == method) {
3913 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3915 ins->flags |= MONO_INST_NONEMPTY_STACK;
3916 MONO_ADD_INS (cfg->cbb, ins);
3921 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3923 if (mini_get_debug_options ()->better_cast_details) {
3924 int vtable_reg = alloc_preg (cfg);
3925 int klass_reg = alloc_preg (cfg);
3926 MonoBasicBlock *is_null_bb = NULL;
3928 int to_klass_reg, context_used;
3931 NEW_BBLOCK (cfg, is_null_bb);
3933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3934 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3937 tls_get = mono_get_jit_tls_intrinsic (cfg);
3939 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3943 MONO_ADD_INS (cfg->cbb, tls_get);
3944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3947 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3949 context_used = mini_class_check_context_used (cfg, klass);
3951 MonoInst *class_ins;
3953 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3954 to_klass_reg = class_ins->dreg;
3956 to_klass_reg = alloc_preg (cfg);
3957 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3962 MONO_START_BB (cfg, is_null_bb);
3967 reset_cast_details (MonoCompile *cfg)
3969 /* Reset the variables holding the cast details */
3970 if (mini_get_debug_options ()->better_cast_details) {
3971 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3973 MONO_ADD_INS (cfg->cbb, tls_get);
3974 /* It is enough to reset the from field */
3975 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3980 * On return the caller must check @array_class for load errors
3983 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3985 int vtable_reg = alloc_preg (cfg);
3988 context_used = mini_class_check_context_used (cfg, array_class);
3990 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3992 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3994 if (cfg->opt & MONO_OPT_SHARED) {
3995 int class_reg = alloc_preg (cfg);
3998 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3999 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
4000 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
4001 } else if (context_used) {
4002 MonoInst *vtable_ins;
4004 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
4005 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
4007 if (cfg->compile_aot) {
4011 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4013 vt_reg = alloc_preg (cfg);
4014 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
4015 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
4018 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4020 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4024 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4026 reset_cast_details (cfg);
4030 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4031 * generic code is generated.
4034 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4036 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4039 MonoInst *rgctx, *addr;
4041 /* FIXME: What if the class is shared? We might not
4042 have to get the address of the method from the
4044 addr = emit_get_rgctx_method (cfg, context_used, method,
4045 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4046 if (cfg->llvm_only && cfg->gsharedvt) {
4047 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4049 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4051 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4054 gboolean pass_vtable, pass_mrgctx;
4055 MonoInst *rgctx_arg = NULL;
4057 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4058 g_assert (!pass_mrgctx);
4061 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4064 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4067 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4072 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4076 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4077 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4078 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4079 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4081 obj_reg = sp [0]->dreg;
4082 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4083 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4085 /* FIXME: generics */
4086 g_assert (klass->rank == 0);
4089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4090 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4096 MonoInst *element_class;
4098 /* This assertion is from the unboxcast insn */
4099 g_assert (klass->rank == 0);
4101 element_class = emit_get_rgctx_klass (cfg, context_used,
4102 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4104 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4105 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4107 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4108 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4109 reset_cast_details (cfg);
4112 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4113 MONO_ADD_INS (cfg->cbb, add);
4114 add->type = STACK_MP;
4121 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4123 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4124 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4128 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4134 args [1] = klass_inst;
4137 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4139 NEW_BBLOCK (cfg, is_ref_bb);
4140 NEW_BBLOCK (cfg, is_nullable_bb);
4141 NEW_BBLOCK (cfg, end_bb);
4142 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4149 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4150 addr_reg = alloc_dreg (cfg, STACK_MP);
4154 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4155 MONO_ADD_INS (cfg->cbb, addr);
4157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4160 MONO_START_BB (cfg, is_ref_bb);
4162 /* Save the ref to a temporary */
4163 dreg = alloc_ireg (cfg);
4164 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4165 addr->dreg = addr_reg;
4166 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4167 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4170 MONO_START_BB (cfg, is_nullable_bb);
4173 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4174 MonoInst *unbox_call;
4175 MonoMethodSignature *unbox_sig;
4177 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4178 unbox_sig->ret = &klass->byval_arg;
4179 unbox_sig->param_count = 1;
4180 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4183 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4185 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4187 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4188 addr->dreg = addr_reg;
4191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4194 MONO_START_BB (cfg, end_bb);
4197 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4203 * Returns NULL and set the cfg exception on error.
4206 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4208 MonoInst *iargs [2];
4213 MonoRgctxInfoType rgctx_info;
4214 MonoInst *iargs [2];
4215 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4217 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4219 if (cfg->opt & MONO_OPT_SHARED)
4220 rgctx_info = MONO_RGCTX_INFO_KLASS;
4222 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4223 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4225 if (cfg->opt & MONO_OPT_SHARED) {
4226 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4228 alloc_ftn = ves_icall_object_new;
4231 alloc_ftn = ves_icall_object_new_specific;
4234 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4235 if (known_instance_size) {
4236 int size = mono_class_instance_size (klass);
4237 if (size < sizeof (MonoObject))
4238 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4240 EMIT_NEW_ICONST (cfg, iargs [1], size);
4242 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4245 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4248 if (cfg->opt & MONO_OPT_SHARED) {
4249 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4250 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4252 alloc_ftn = ves_icall_object_new;
4253 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4254 /* This happens often in argument checking code, eg. throw new FooException... */
4255 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4256 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4257 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4259 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4260 MonoMethod *managed_alloc = NULL;
4264 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4265 cfg->exception_ptr = klass;
4269 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4271 if (managed_alloc) {
4272 int size = mono_class_instance_size (klass);
4273 if (size < sizeof (MonoObject))
4274 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4276 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4277 EMIT_NEW_ICONST (cfg, iargs [1], size);
4278 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4280 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4282 guint32 lw = vtable->klass->instance_size;
4283 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4284 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4285 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4288 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4292 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4296 * Returns NULL and set the cfg exception on error.
4299 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4301 MonoInst *alloc, *ins;
4303 if (mono_class_is_nullable (klass)) {
4304 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4307 if (cfg->llvm_only && cfg->gsharedvt) {
4308 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4309 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4310 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4312 /* FIXME: What if the class is shared? We might not
4313 have to get the method address from the RGCTX. */
4314 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4315 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4316 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4318 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4321 gboolean pass_vtable, pass_mrgctx;
4322 MonoInst *rgctx_arg = NULL;
4324 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4325 g_assert (!pass_mrgctx);
4328 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4331 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4334 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4338 if (mini_is_gsharedvt_klass (klass)) {
4339 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4340 MonoInst *res, *is_ref, *src_var, *addr;
4343 dreg = alloc_ireg (cfg);
4345 NEW_BBLOCK (cfg, is_ref_bb);
4346 NEW_BBLOCK (cfg, is_nullable_bb);
4347 NEW_BBLOCK (cfg, end_bb);
4348 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4356 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4359 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4360 ins->opcode = OP_STOREV_MEMBASE;
4362 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4363 res->type = STACK_OBJ;
4365 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4368 MONO_START_BB (cfg, is_ref_bb);
4370 /* val is a vtype, so has to load the value manually */
4371 src_var = get_vreg_to_inst (cfg, val->dreg);
4373 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4374 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4376 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4379 MONO_START_BB (cfg, is_nullable_bb);
4382 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4383 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4385 MonoMethodSignature *box_sig;
4388 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4389 * construct that method at JIT time, so have to do things by hand.
4391 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4392 box_sig->ret = &mono_defaults.object_class->byval_arg;
4393 box_sig->param_count = 1;
4394 box_sig->params [0] = &klass->byval_arg;
4397 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4399 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4400 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4401 res->type = STACK_OBJ;
4405 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4407 MONO_START_BB (cfg, end_bb);
4411 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4415 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4421 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4424 MonoGenericContainer *container;
4425 MonoGenericInst *ginst;
4427 if (klass->generic_class) {
4428 container = klass->generic_class->container_class->generic_container;
4429 ginst = klass->generic_class->context.class_inst;
4430 } else if (klass->generic_container && context_used) {
4431 container = klass->generic_container;
4432 ginst = container->context.class_inst;
4437 for (i = 0; i < container->type_argc; ++i) {
4439 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4441 type = ginst->type_argv [i];
4442 if (mini_type_is_reference (type))
4448 static GHashTable* direct_icall_type_hash;
4451 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4453 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4454 if (!direct_icalls_enabled (cfg))
4458 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4459 * Whitelist a few icalls for now.
4461 if (!direct_icall_type_hash) {
4462 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4464 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4465 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4466 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4467 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4468 mono_memory_barrier ();
4469 direct_icall_type_hash = h;
4472 if (cmethod->klass == mono_defaults.math_class)
4474 /* No locking needed */
4475 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4480 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4483 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4485 MonoMethod *mono_castclass;
4488 mono_castclass = mono_marshal_get_castclass_with_cache ();
4490 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4491 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4492 reset_cast_details (cfg);
4498 get_castclass_cache_idx (MonoCompile *cfg)
4500 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4501 cfg->castclass_cache_index ++;
4502 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4506 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4515 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4518 idx = get_castclass_cache_idx (cfg);
4519 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4521 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4522 return emit_castclass_with_cache (cfg, klass, args);
4526 * Returns NULL and set the cfg exception on error.
4529 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4531 MonoBasicBlock *is_null_bb;
4532 int obj_reg = src->dreg;
4533 int vtable_reg = alloc_preg (cfg);
4535 MonoInst *klass_inst = NULL, *res;
4537 if (src->opcode == OP_PCONST && src->inst_p0 == 0)
4540 context_used = mini_class_check_context_used (cfg, klass);
4542 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4543 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4544 (*inline_costs) += 2;
4546 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4547 MonoMethod *mono_castclass;
4548 MonoInst *iargs [1];
4551 mono_castclass = mono_marshal_get_castclass (klass);
4554 save_cast_details (cfg, klass, src->dreg, TRUE);
4555 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4556 iargs, ip, cfg->real_offset, TRUE);
4557 reset_cast_details (cfg);
4558 CHECK_CFG_EXCEPTION;
4559 g_assert (costs > 0);
4561 cfg->real_offset += 5;
4563 (*inline_costs) += costs;
4571 if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4572 MonoInst *cache_ins;
4574 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4579 /* klass - it's the second element of the cache entry*/
4580 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4583 args [2] = cache_ins;
4585 return emit_castclass_with_cache (cfg, klass, args);
4588 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4591 NEW_BBLOCK (cfg, is_null_bb);
4593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4596 save_cast_details (cfg, klass, obj_reg, FALSE);
4598 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4600 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4602 int klass_reg = alloc_preg (cfg);
4604 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4606 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4607 /* the remoting code is broken, access the class for now */
4608 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4609 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4611 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4612 cfg->exception_ptr = klass;
4615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4620 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4623 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4627 MONO_START_BB (cfg, is_null_bb);
4629 reset_cast_details (cfg);
4638 * Returns NULL and set the cfg exception on error.
4641 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4644 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4645 int obj_reg = src->dreg;
4646 int vtable_reg = alloc_preg (cfg);
4647 int res_reg = alloc_ireg_ref (cfg);
4648 MonoInst *klass_inst = NULL;
4653 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4654 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4655 MonoInst *cache_ins;
4657 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4662 /* klass - it's the second element of the cache entry*/
4663 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4666 args [2] = cache_ins;
4668 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4671 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4674 NEW_BBLOCK (cfg, is_null_bb);
4675 NEW_BBLOCK (cfg, false_bb);
4676 NEW_BBLOCK (cfg, end_bb);
4678 /* Do the assignment at the beginning, so the other assignment can be if converted */
4679 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4680 ins->type = STACK_OBJ;
4683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4688 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4689 g_assert (!context_used);
4690 /* the is_null_bb target simply copies the input register to the output */
4691 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4693 int klass_reg = alloc_preg (cfg);
4696 int rank_reg = alloc_preg (cfg);
4697 int eclass_reg = alloc_preg (cfg);
4699 g_assert (!context_used);
4700 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4701 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4702 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4705 if (klass->cast_class == mono_defaults.object_class) {
4706 int parent_reg = alloc_preg (cfg);
4707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4708 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4709 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4711 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4712 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4713 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4715 } else if (klass->cast_class == mono_defaults.enum_class) {
4716 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4718 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4719 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4721 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4722 /* Check that the object is a vector too */
4723 int bounds_reg = alloc_preg (cfg);
4724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4729 /* the is_null_bb target simply copies the input register to the output */
4730 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4732 } else if (mono_class_is_nullable (klass)) {
4733 g_assert (!context_used);
4734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4735 /* the is_null_bb target simply copies the input register to the output */
4736 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4738 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4739 g_assert (!context_used);
4740 /* the remoting code is broken, access the class for now */
4741 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4742 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4744 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4745 cfg->exception_ptr = klass;
4748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4757 /* the is_null_bb target simply copies the input register to the output */
4758 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4763 MONO_START_BB (cfg, false_bb);
4765 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4768 MONO_START_BB (cfg, is_null_bb);
4770 MONO_START_BB (cfg, end_bb);
4776 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4778 /* This opcode takes as input an object reference and a class, and returns:
4779 0) if the object is an instance of the class,
4780 1) if the object is not instance of the class,
4781 2) if the object is a proxy whose type cannot be determined */
4784 #ifndef DISABLE_REMOTING
4785 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4787 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4789 int obj_reg = src->dreg;
4790 int dreg = alloc_ireg (cfg);
4792 #ifndef DISABLE_REMOTING
4793 int klass_reg = alloc_preg (cfg);
4796 NEW_BBLOCK (cfg, true_bb);
4797 NEW_BBLOCK (cfg, false_bb);
4798 NEW_BBLOCK (cfg, end_bb);
4799 #ifndef DISABLE_REMOTING
4800 NEW_BBLOCK (cfg, false2_bb);
4801 NEW_BBLOCK (cfg, no_proxy_bb);
4804 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4805 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4807 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4808 #ifndef DISABLE_REMOTING
4809 NEW_BBLOCK (cfg, interface_fail_bb);
4812 tmp_reg = alloc_preg (cfg);
4813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4814 #ifndef DISABLE_REMOTING
4815 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4816 MONO_START_BB (cfg, interface_fail_bb);
4817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4819 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4821 tmp_reg = alloc_preg (cfg);
4822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4823 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4824 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4826 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4829 #ifndef DISABLE_REMOTING
4830 tmp_reg = alloc_preg (cfg);
4831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4834 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4835 tmp_reg = alloc_preg (cfg);
4836 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4837 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4839 tmp_reg = alloc_preg (cfg);
4840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4842 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4844 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4845 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4847 MONO_START_BB (cfg, no_proxy_bb);
4849 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4851 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4855 MONO_START_BB (cfg, false_bb);
4857 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4858 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4860 #ifndef DISABLE_REMOTING
4861 MONO_START_BB (cfg, false2_bb);
4863 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4864 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4867 MONO_START_BB (cfg, true_bb);
4869 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4871 MONO_START_BB (cfg, end_bb);
4874 MONO_INST_NEW (cfg, ins, OP_ICONST);
4876 ins->type = STACK_I4;
4882 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4884 /* This opcode takes as input an object reference and a class, and returns:
4885 0) if the object is an instance of the class,
4886 1) if the object is a proxy whose type cannot be determined
4887 an InvalidCastException exception is thrown otherwhise*/
4890 #ifndef DISABLE_REMOTING
4891 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4893 MonoBasicBlock *ok_result_bb;
4895 int obj_reg = src->dreg;
4896 int dreg = alloc_ireg (cfg);
4897 int tmp_reg = alloc_preg (cfg);
4899 #ifndef DISABLE_REMOTING
4900 int klass_reg = alloc_preg (cfg);
4901 NEW_BBLOCK (cfg, end_bb);
4904 NEW_BBLOCK (cfg, ok_result_bb);
4906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4909 save_cast_details (cfg, klass, obj_reg, FALSE);
4911 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4912 #ifndef DISABLE_REMOTING
4913 NEW_BBLOCK (cfg, interface_fail_bb);
4915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4916 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4917 MONO_START_BB (cfg, interface_fail_bb);
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4920 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4922 tmp_reg = alloc_preg (cfg);
4923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4925 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4927 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4928 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4931 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4935 #ifndef DISABLE_REMOTING
4936 NEW_BBLOCK (cfg, no_proxy_bb);
4938 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4940 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4942 tmp_reg = alloc_preg (cfg);
4943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4946 tmp_reg = alloc_preg (cfg);
4947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4948 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4949 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4951 NEW_BBLOCK (cfg, fail_1_bb);
4953 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4955 MONO_START_BB (cfg, fail_1_bb);
4957 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4958 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4960 MONO_START_BB (cfg, no_proxy_bb);
4962 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4964 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4968 MONO_START_BB (cfg, ok_result_bb);
4970 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4972 #ifndef DISABLE_REMOTING
4973 MONO_START_BB (cfg, end_bb);
4977 MONO_INST_NEW (cfg, ins, OP_ICONST);
4979 ins->type = STACK_I4;
4984 static G_GNUC_UNUSED MonoInst*
4985 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4987 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4988 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4991 switch (enum_type->type) {
4994 #if SIZEOF_REGISTER == 8
5006 MonoInst *load, *and_, *cmp, *ceq;
5007 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5008 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5009 int dest_reg = alloc_ireg (cfg);
5011 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
5012 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
5013 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
5014 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
5016 ceq->type = STACK_I4;
5019 load = mono_decompose_opcode (cfg, load);
5020 and_ = mono_decompose_opcode (cfg, and_);
5021 cmp = mono_decompose_opcode (cfg, cmp);
5022 ceq = mono_decompose_opcode (cfg, ceq);
5030 * Returns NULL and set the cfg exception on error.
5032 static G_GNUC_UNUSED MonoInst*
5033 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5037 gpointer trampoline;
5038 MonoInst *obj, *method_ins, *tramp_ins;
5042 if (virtual_ && !cfg->llvm_only) {
5043 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5046 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5050 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5054 /* Inline the contents of mono_delegate_ctor */
5056 /* Set target field */
5057 /* Optimize away setting of NULL target */
5058 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5059 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5060 if (cfg->gen_write_barriers) {
5061 dreg = alloc_preg (cfg);
5062 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5063 emit_write_barrier (cfg, ptr, target);
5067 /* Set method field */
5068 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5069 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5072 * To avoid looking up the compiled code belonging to the target method
5073 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5074 * store it, and we fill it after the method has been compiled.
5076 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5077 MonoInst *code_slot_ins;
5080 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5082 domain = mono_domain_get ();
5083 mono_domain_lock (domain);
5084 if (!domain_jit_info (domain)->method_code_hash)
5085 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5086 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5088 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5089 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5091 mono_domain_unlock (domain);
5093 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5095 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5098 if (cfg->llvm_only) {
5099 MonoInst *args [16];
5104 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5105 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5108 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5114 if (cfg->compile_aot) {
5115 MonoDelegateClassMethodPair *del_tramp;
5117 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5118 del_tramp->klass = klass;
5119 del_tramp->method = context_used ? NULL : method;
5120 del_tramp->is_virtual = virtual_;
5121 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5124 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5126 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5127 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5130 /* Set invoke_impl field */
5132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5134 dreg = alloc_preg (cfg);
5135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5136 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5138 dreg = alloc_preg (cfg);
5139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5143 dreg = alloc_preg (cfg);
5144 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5147 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5153 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5155 MonoJitICallInfo *info;
5157 /* Need to register the icall so it gets an icall wrapper */
5158 info = mono_get_array_new_va_icall (rank);
5160 cfg->flags |= MONO_CFG_HAS_VARARGS;
5162 /* mono_array_new_va () needs a vararg calling convention */
5163 cfg->exception_message = g_strdup ("array-new");
5164 cfg->disable_llvm = TRUE;
5166 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5167 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5171 * handle_constrained_gsharedvt_call:
5173 * Handle constrained calls where the receiver is a gsharedvt type.
5174 * Return the instruction representing the call. Set the cfg exception on failure.
5177 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5178 gboolean *ref_emit_widen)
5180 MonoInst *ins = NULL;
5181 gboolean emit_widen = *ref_emit_widen;
5184 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5185 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5186 * pack the arguments into an array, and do the rest of the work in in an icall.
5188 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5189 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5190 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5191 MonoInst *args [16];
5194 * This case handles calls to
5195 * - object:ToString()/Equals()/GetHashCode(),
5196 * - System.IComparable<T>:CompareTo()
5197 * - System.IEquatable<T>:Equals ()
5198 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5202 if (mono_method_check_context_used (cmethod))
5203 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5205 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5206 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5208 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5209 if (fsig->hasthis && fsig->param_count) {
5210 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5211 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5212 ins->dreg = alloc_preg (cfg);
5213 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5214 MONO_ADD_INS (cfg->cbb, ins);
5217 if (mini_is_gsharedvt_type (fsig->params [0])) {
5218 int addr_reg, deref_arg_reg;
5220 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5221 deref_arg_reg = alloc_preg (cfg);
5222 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5223 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5225 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5226 addr_reg = ins->dreg;
5227 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5229 EMIT_NEW_ICONST (cfg, args [3], 0);
5230 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5233 EMIT_NEW_ICONST (cfg, args [3], 0);
5234 EMIT_NEW_ICONST (cfg, args [4], 0);
5236 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5239 if (mini_is_gsharedvt_type (fsig->ret)) {
5240 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5241 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5245 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5246 MONO_ADD_INS (cfg->cbb, add);
5248 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5249 MONO_ADD_INS (cfg->cbb, ins);
5250 /* ins represents the call result */
5253 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5256 *ref_emit_widen = emit_widen;
5265 mono_emit_load_got_addr (MonoCompile *cfg)
5267 MonoInst *getaddr, *dummy_use;
5269 if (!cfg->got_var || cfg->got_var_allocated)
5272 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5273 getaddr->cil_code = cfg->header->code;
5274 getaddr->dreg = cfg->got_var->dreg;
5276 /* Add it to the start of the first bblock */
5277 if (cfg->bb_entry->code) {
5278 getaddr->next = cfg->bb_entry->code;
5279 cfg->bb_entry->code = getaddr;
5282 MONO_ADD_INS (cfg->bb_entry, getaddr);
5284 cfg->got_var_allocated = TRUE;
5287 * Add a dummy use to keep the got_var alive, since real uses might
5288 * only be generated by the back ends.
5289 * Add it to end_bblock, so the variable's lifetime covers the whole
5291 * It would be better to make the usage of the got var explicit in all
5292 * cases when the backend needs it (i.e. calls, throw etc.), so this
5293 * wouldn't be needed.
5295 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5296 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5299 static int inline_limit;
5300 static gboolean inline_limit_inited;
5303 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5305 MonoMethodHeaderSummary header;
5307 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5308 MonoMethodSignature *sig = mono_method_signature (method);
5312 if (cfg->disable_inline)
5317 if (cfg->inline_depth > 10)
5320 if (!mono_method_get_header_summary (method, &header))
5323 /*runtime, icall and pinvoke are checked by summary call*/
5324 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5325 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5326 (mono_class_is_marshalbyref (method->klass)) ||
5330 /* also consider num_locals? */
5331 /* Do the size check early to avoid creating vtables */
5332 if (!inline_limit_inited) {
5333 if (g_getenv ("MONO_INLINELIMIT"))
5334 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5336 inline_limit = INLINE_LENGTH_LIMIT;
5337 inline_limit_inited = TRUE;
5339 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5343 * if we can initialize the class of the method right away, we do,
5344 * otherwise we don't allow inlining if the class needs initialization,
5345 * since it would mean inserting a call to mono_runtime_class_init()
5346 * inside the inlined code
5348 if (!(cfg->opt & MONO_OPT_SHARED)) {
5349 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5350 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5351 vtable = mono_class_vtable (cfg->domain, method->klass);
5354 if (!cfg->compile_aot) {
5356 if (!mono_runtime_class_init_full (vtable, &error)) {
5357 mono_error_cleanup (&error);
5361 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5362 if (cfg->run_cctors && method->klass->has_cctor) {
5363 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5364 if (!method->klass->runtime_info)
5365 /* No vtable created yet */
5367 vtable = mono_class_vtable (cfg->domain, method->klass);
5370 /* This makes so that inline cannot trigger */
5371 /* .cctors: too many apps depend on them */
5372 /* running with a specific order... */
5373 if (! vtable->initialized)
5376 if (!mono_runtime_class_init_full (vtable, &error)) {
5377 mono_error_cleanup (&error);
5381 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5382 if (!method->klass->runtime_info)
5383 /* No vtable created yet */
5385 vtable = mono_class_vtable (cfg->domain, method->klass);
5388 if (!vtable->initialized)
5393 * If we're compiling for shared code
5394 * the cctor will need to be run at aot method load time, for example,
5395 * or at the end of the compilation of the inlining method.
5397 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5401 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5402 if (mono_arch_is_soft_float ()) {
5404 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5406 for (i = 0; i < sig->param_count; ++i)
5407 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5412 if (g_list_find (cfg->dont_inline, method))
5419 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5421 if (!cfg->compile_aot) {
5423 if (vtable->initialized)
5427 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5428 if (cfg->method == method)
5432 if (!mono_class_needs_cctor_run (klass, method))
5435 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5436 /* The initialization is already done before the method is called */
5443 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5447 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5450 if (mini_is_gsharedvt_variable_klass (klass)) {
5453 mono_class_init (klass);
5454 size = mono_class_array_element_size (klass);
5457 mult_reg = alloc_preg (cfg);
5458 array_reg = arr->dreg;
5459 index_reg = index->dreg;
5461 #if SIZEOF_REGISTER == 8
5462 /* The array reg is 64 bits but the index reg is only 32 */
5463 if (COMPILE_LLVM (cfg)) {
5465 index2_reg = index_reg;
5467 index2_reg = alloc_preg (cfg);
5468 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5471 if (index->type == STACK_I8) {
5472 index2_reg = alloc_preg (cfg);
5473 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5475 index2_reg = index_reg;
5480 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5482 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5483 if (size == 1 || size == 2 || size == 4 || size == 8) {
5484 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5486 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5487 ins->klass = mono_class_get_element_class (klass);
5488 ins->type = STACK_MP;
5494 add_reg = alloc_ireg_mp (cfg);
5497 MonoInst *rgctx_ins;
5500 g_assert (cfg->gshared);
5501 context_used = mini_class_check_context_used (cfg, klass);
5502 g_assert (context_used);
5503 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5504 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5508 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5509 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5510 ins->klass = mono_class_get_element_class (klass);
5511 ins->type = STACK_MP;
5512 MONO_ADD_INS (cfg->cbb, ins);
5518 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5520 int bounds_reg = alloc_preg (cfg);
5521 int add_reg = alloc_ireg_mp (cfg);
5522 int mult_reg = alloc_preg (cfg);
5523 int mult2_reg = alloc_preg (cfg);
5524 int low1_reg = alloc_preg (cfg);
5525 int low2_reg = alloc_preg (cfg);
5526 int high1_reg = alloc_preg (cfg);
5527 int high2_reg = alloc_preg (cfg);
5528 int realidx1_reg = alloc_preg (cfg);
5529 int realidx2_reg = alloc_preg (cfg);
5530 int sum_reg = alloc_preg (cfg);
5531 int index1, index2, tmpreg;
5535 mono_class_init (klass);
5536 size = mono_class_array_element_size (klass);
5538 index1 = index_ins1->dreg;
5539 index2 = index_ins2->dreg;
5541 #if SIZEOF_REGISTER == 8
5542 /* The array reg is 64 bits but the index reg is only 32 */
5543 if (COMPILE_LLVM (cfg)) {
5546 tmpreg = alloc_preg (cfg);
5547 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5549 tmpreg = alloc_preg (cfg);
5550 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5554 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5558 /* range checking */
5559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5560 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5563 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5564 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5565 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5566 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5567 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5568 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5570 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5571 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5572 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5574 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5575 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5576 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5578 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5579 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5581 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5582 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5584 ins->type = STACK_MP;
5586 MONO_ADD_INS (cfg->cbb, ins);
5592 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5596 MonoMethod *addr_method;
5598 MonoClass *eclass = cmethod->klass->element_class;
5600 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5603 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5605 /* emit_ldelema_2 depends on OP_LMUL */
5606 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5607 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5610 if (mini_is_gsharedvt_variable_klass (eclass))
5613 element_size = mono_class_array_element_size (eclass);
5614 addr_method = mono_marshal_get_array_address (rank, element_size);
5615 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5620 static MonoBreakPolicy
5621 always_insert_breakpoint (MonoMethod *method)
5623 return MONO_BREAK_POLICY_ALWAYS;
5626 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5629 * mono_set_break_policy:
5630 * policy_callback: the new callback function
5632 * Allow embedders to decide wherther to actually obey breakpoint instructions
5633 * (both break IL instructions and Debugger.Break () method calls), for example
5634 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5635 * untrusted or semi-trusted code.
5637 * @policy_callback will be called every time a break point instruction needs to
5638 * be inserted with the method argument being the method that calls Debugger.Break()
5639 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5640 * if it wants the breakpoint to not be effective in the given method.
5641 * #MONO_BREAK_POLICY_ALWAYS is the default.
5644 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5646 if (policy_callback)
5647 break_policy_func = policy_callback;
5649 break_policy_func = always_insert_breakpoint;
5653 should_insert_brekpoint (MonoMethod *method) {
5654 switch (break_policy_func (method)) {
5655 case MONO_BREAK_POLICY_ALWAYS:
5657 case MONO_BREAK_POLICY_NEVER:
5659 case MONO_BREAK_POLICY_ON_DBG:
5660 g_warning ("mdb no longer supported");
5663 g_warning ("Incorrect value returned from break policy callback");
5668 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5670 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5672 MonoInst *addr, *store, *load;
5673 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5675 /* the bounds check is already done by the callers */
5676 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5678 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5679 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5680 if (mini_type_is_reference (fsig->params [2]))
5681 emit_write_barrier (cfg, addr, load);
5683 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5684 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5691 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5693 return mini_type_is_reference (&klass->byval_arg);
5697 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5699 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5700 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5701 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5702 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5703 MonoInst *iargs [3];
5706 mono_class_setup_vtable (obj_array);
5707 g_assert (helper->slot);
5709 if (sp [0]->type != STACK_OBJ)
5711 if (sp [2]->type != STACK_OBJ)
5718 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5722 if (mini_is_gsharedvt_variable_klass (klass)) {
5725 // FIXME-VT: OP_ICONST optimization
5726 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5727 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5728 ins->opcode = OP_STOREV_MEMBASE;
5729 } else if (sp [1]->opcode == OP_ICONST) {
5730 int array_reg = sp [0]->dreg;
5731 int index_reg = sp [1]->dreg;
5732 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5734 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5735 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5738 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5739 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5741 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5742 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5743 if (generic_class_is_reference_type (cfg, klass))
5744 emit_write_barrier (cfg, addr, sp [2]);
5751 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5756 eklass = mono_class_from_mono_type (fsig->params [2]);
5758 eklass = mono_class_from_mono_type (fsig->ret);
5761 return emit_array_store (cfg, eklass, args, FALSE);
5763 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5770 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5773 int param_size, return_size;
5775 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5776 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5778 if (cfg->verbose_level > 3)
5779 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5781 //Don't allow mixing reference types with value types
5782 if (param_klass->valuetype != return_klass->valuetype) {
5783 if (cfg->verbose_level > 3)
5784 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5788 if (!param_klass->valuetype) {
5789 if (cfg->verbose_level > 3)
5790 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5795 if (param_klass->has_references || return_klass->has_references)
5798 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5799 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5800 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5801 if (cfg->verbose_level > 3)
5802 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5806 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5807 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5808 if (cfg->verbose_level > 3)
5809 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5813 param_size = mono_class_value_size (param_klass, &align);
5814 return_size = mono_class_value_size (return_klass, &align);
5816 //We can do it if sizes match
5817 if (param_size == return_size) {
5818 if (cfg->verbose_level > 3)
5819 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5823 //No simple way to handle struct if sizes don't match
5824 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5825 if (cfg->verbose_level > 3)
5826 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5831 * Same reg size category.
5832 * A quick note on why we don't require widening here.
5833 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5835 * Since the source value comes from a function argument, the JIT will already have
5836 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5838 if (param_size <= 4 && return_size <= 4) {
5839 if (cfg->verbose_level > 3)
5840 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5848 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5850 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5851 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5853 if (mini_is_gsharedvt_variable_type (fsig->ret))
5856 //Valuetypes that are semantically equivalent or numbers than can be widened to
5857 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5860 //Arrays of valuetypes that are semantically equivalent
5861 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5868 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5870 #ifdef MONO_ARCH_SIMD_INTRINSICS
5871 MonoInst *ins = NULL;
5873 if (cfg->opt & MONO_OPT_SIMD) {
5874 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5880 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5884 emit_memory_barrier (MonoCompile *cfg, int kind)
5886 MonoInst *ins = NULL;
5887 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5888 MONO_ADD_INS (cfg->cbb, ins);
5889 ins->backend.memory_barrier_kind = kind;
5895 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5897 MonoInst *ins = NULL;
5900 /* The LLVM backend supports these intrinsics */
5901 if (cmethod->klass == mono_defaults.math_class) {
5902 if (strcmp (cmethod->name, "Sin") == 0) {
5904 } else if (strcmp (cmethod->name, "Cos") == 0) {
5906 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5908 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5912 if (opcode && fsig->param_count == 1) {
5913 MONO_INST_NEW (cfg, ins, opcode);
5914 ins->type = STACK_R8;
5915 ins->dreg = mono_alloc_freg (cfg);
5916 ins->sreg1 = args [0]->dreg;
5917 MONO_ADD_INS (cfg->cbb, ins);
5921 if (cfg->opt & MONO_OPT_CMOV) {
5922 if (strcmp (cmethod->name, "Min") == 0) {
5923 if (fsig->params [0]->type == MONO_TYPE_I4)
5925 if (fsig->params [0]->type == MONO_TYPE_U4)
5926 opcode = OP_IMIN_UN;
5927 else if (fsig->params [0]->type == MONO_TYPE_I8)
5929 else if (fsig->params [0]->type == MONO_TYPE_U8)
5930 opcode = OP_LMIN_UN;
5931 } else if (strcmp (cmethod->name, "Max") == 0) {
5932 if (fsig->params [0]->type == MONO_TYPE_I4)
5934 if (fsig->params [0]->type == MONO_TYPE_U4)
5935 opcode = OP_IMAX_UN;
5936 else if (fsig->params [0]->type == MONO_TYPE_I8)
5938 else if (fsig->params [0]->type == MONO_TYPE_U8)
5939 opcode = OP_LMAX_UN;
5943 if (opcode && fsig->param_count == 2) {
5944 MONO_INST_NEW (cfg, ins, opcode);
5945 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5946 ins->dreg = mono_alloc_ireg (cfg);
5947 ins->sreg1 = args [0]->dreg;
5948 ins->sreg2 = args [1]->dreg;
5949 MONO_ADD_INS (cfg->cbb, ins);
5957 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5959 if (cmethod->klass == mono_defaults.array_class) {
5960 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5961 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5962 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5963 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5964 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5965 return emit_array_unsafe_mov (cfg, fsig, args);
5972 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5974 MonoInst *ins = NULL;
5976 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5978 if (cmethod->klass == mono_defaults.string_class) {
5979 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5980 int dreg = alloc_ireg (cfg);
5981 int index_reg = alloc_preg (cfg);
5982 int add_reg = alloc_preg (cfg);
5984 #if SIZEOF_REGISTER == 8
5985 if (COMPILE_LLVM (cfg)) {
5986 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5988 /* The array reg is 64 bits but the index reg is only 32 */
5989 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5992 index_reg = args [1]->dreg;
5994 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5996 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5997 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5998 add_reg = ins->dreg;
5999 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6002 int mult_reg = alloc_preg (cfg);
6003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
6004 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
6005 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6006 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
6008 type_from_op (cfg, ins, NULL, NULL);
6010 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6011 int dreg = alloc_ireg (cfg);
6012 /* Decompose later to allow more optimizations */
6013 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
6014 ins->type = STACK_I4;
6015 ins->flags |= MONO_INST_FAULT;
6016 cfg->cbb->has_array_access = TRUE;
6017 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
6022 } else if (cmethod->klass == mono_defaults.object_class) {
6023 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
6024 int dreg = alloc_ireg_ref (cfg);
6025 int vt_reg = alloc_preg (cfg);
6026 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6027 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
6028 type_from_op (cfg, ins, NULL, NULL);
6031 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6032 int dreg = alloc_ireg (cfg);
6033 int t1 = alloc_ireg (cfg);
6035 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6036 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6037 ins->type = STACK_I4;
6040 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6041 MONO_INST_NEW (cfg, ins, OP_NOP);
6042 MONO_ADD_INS (cfg->cbb, ins);
6046 } else if (cmethod->klass == mono_defaults.array_class) {
6047 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6048 return emit_array_generic_access (cfg, fsig, args, FALSE);
6049 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6050 return emit_array_generic_access (cfg, fsig, args, TRUE);
6052 #ifndef MONO_BIG_ARRAYS
6054 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6057 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6058 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6059 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6060 int dreg = alloc_ireg (cfg);
6061 int bounds_reg = alloc_ireg_mp (cfg);
6062 MonoBasicBlock *end_bb, *szarray_bb;
6063 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6065 NEW_BBLOCK (cfg, end_bb);
6066 NEW_BBLOCK (cfg, szarray_bb);
6068 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6069 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6072 /* Non-szarray case */
6074 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6075 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6077 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6078 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6080 MONO_START_BB (cfg, szarray_bb);
6083 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6084 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6086 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6087 MONO_START_BB (cfg, end_bb);
6089 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6090 ins->type = STACK_I4;
6096 if (cmethod->name [0] != 'g')
6099 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6100 int dreg = alloc_ireg (cfg);
6101 int vtable_reg = alloc_preg (cfg);
6102 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6103 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6104 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6105 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6106 type_from_op (cfg, ins, NULL, NULL);
6109 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6110 int dreg = alloc_ireg (cfg);
6112 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6113 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6114 type_from_op (cfg, ins, NULL, NULL);
6119 } else if (cmethod->klass == runtime_helpers_class) {
6120 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6121 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6125 } else if (cmethod->klass == mono_defaults.monitor_class) {
6126 gboolean is_enter = FALSE;
6128 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6133 * To make async stack traces work, icalls which can block should have a wrapper.
6134 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6136 MonoBasicBlock *end_bb;
6138 NEW_BBLOCK (cfg, end_bb);
6140 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter_fast, args);
6141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6143 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter, args);
6144 MONO_START_BB (cfg, end_bb);
6147 } else if (cmethod->klass == mono_defaults.thread_class) {
6148 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6149 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6150 MONO_ADD_INS (cfg->cbb, ins);
6152 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6153 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6154 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6156 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6158 if (fsig->params [0]->type == MONO_TYPE_I1)
6159 opcode = OP_LOADI1_MEMBASE;
6160 else if (fsig->params [0]->type == MONO_TYPE_U1)
6161 opcode = OP_LOADU1_MEMBASE;
6162 else if (fsig->params [0]->type == MONO_TYPE_I2)
6163 opcode = OP_LOADI2_MEMBASE;
6164 else if (fsig->params [0]->type == MONO_TYPE_U2)
6165 opcode = OP_LOADU2_MEMBASE;
6166 else if (fsig->params [0]->type == MONO_TYPE_I4)
6167 opcode = OP_LOADI4_MEMBASE;
6168 else if (fsig->params [0]->type == MONO_TYPE_U4)
6169 opcode = OP_LOADU4_MEMBASE;
6170 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6171 opcode = OP_LOADI8_MEMBASE;
6172 else if (fsig->params [0]->type == MONO_TYPE_R4)
6173 opcode = OP_LOADR4_MEMBASE;
6174 else if (fsig->params [0]->type == MONO_TYPE_R8)
6175 opcode = OP_LOADR8_MEMBASE;
6176 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6177 opcode = OP_LOAD_MEMBASE;
6180 MONO_INST_NEW (cfg, ins, opcode);
6181 ins->inst_basereg = args [0]->dreg;
6182 ins->inst_offset = 0;
6183 MONO_ADD_INS (cfg->cbb, ins);
6185 switch (fsig->params [0]->type) {
6192 ins->dreg = mono_alloc_ireg (cfg);
6193 ins->type = STACK_I4;
6197 ins->dreg = mono_alloc_lreg (cfg);
6198 ins->type = STACK_I8;
6202 ins->dreg = mono_alloc_ireg (cfg);
6203 #if SIZEOF_REGISTER == 8
6204 ins->type = STACK_I8;
6206 ins->type = STACK_I4;
6211 ins->dreg = mono_alloc_freg (cfg);
6212 ins->type = STACK_R8;
6215 g_assert (mini_type_is_reference (fsig->params [0]));
6216 ins->dreg = mono_alloc_ireg_ref (cfg);
6217 ins->type = STACK_OBJ;
6221 if (opcode == OP_LOADI8_MEMBASE)
6222 ins = mono_decompose_opcode (cfg, ins);
6224 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6228 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6230 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6232 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6233 opcode = OP_STOREI1_MEMBASE_REG;
6234 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6235 opcode = OP_STOREI2_MEMBASE_REG;
6236 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6237 opcode = OP_STOREI4_MEMBASE_REG;
6238 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6239 opcode = OP_STOREI8_MEMBASE_REG;
6240 else if (fsig->params [0]->type == MONO_TYPE_R4)
6241 opcode = OP_STORER4_MEMBASE_REG;
6242 else if (fsig->params [0]->type == MONO_TYPE_R8)
6243 opcode = OP_STORER8_MEMBASE_REG;
6244 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6245 opcode = OP_STORE_MEMBASE_REG;
6248 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6250 MONO_INST_NEW (cfg, ins, opcode);
6251 ins->sreg1 = args [1]->dreg;
6252 ins->inst_destbasereg = args [0]->dreg;
6253 ins->inst_offset = 0;
6254 MONO_ADD_INS (cfg->cbb, ins);
6256 if (opcode == OP_STOREI8_MEMBASE_REG)
6257 ins = mono_decompose_opcode (cfg, ins);
6262 } else if (cmethod->klass->image == mono_defaults.corlib &&
6263 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6264 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6267 #if SIZEOF_REGISTER == 8
6268 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6269 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6270 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6271 ins->dreg = mono_alloc_preg (cfg);
6272 ins->sreg1 = args [0]->dreg;
6273 ins->type = STACK_I8;
6274 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6275 MONO_ADD_INS (cfg->cbb, ins);
6279 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6281 /* 64 bit reads are already atomic */
6282 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6283 load_ins->dreg = mono_alloc_preg (cfg);
6284 load_ins->inst_basereg = args [0]->dreg;
6285 load_ins->inst_offset = 0;
6286 load_ins->type = STACK_I8;
6287 MONO_ADD_INS (cfg->cbb, load_ins);
6289 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6296 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6297 MonoInst *ins_iconst;
6300 if (fsig->params [0]->type == MONO_TYPE_I4) {
6301 opcode = OP_ATOMIC_ADD_I4;
6302 cfg->has_atomic_add_i4 = TRUE;
6304 #if SIZEOF_REGISTER == 8
6305 else if (fsig->params [0]->type == MONO_TYPE_I8)
6306 opcode = OP_ATOMIC_ADD_I8;
6309 if (!mono_arch_opcode_supported (opcode))
6311 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6312 ins_iconst->inst_c0 = 1;
6313 ins_iconst->dreg = mono_alloc_ireg (cfg);
6314 MONO_ADD_INS (cfg->cbb, ins_iconst);
6316 MONO_INST_NEW (cfg, ins, opcode);
6317 ins->dreg = mono_alloc_ireg (cfg);
6318 ins->inst_basereg = args [0]->dreg;
6319 ins->inst_offset = 0;
6320 ins->sreg2 = ins_iconst->dreg;
6321 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6322 MONO_ADD_INS (cfg->cbb, ins);
6324 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6325 MonoInst *ins_iconst;
6328 if (fsig->params [0]->type == MONO_TYPE_I4) {
6329 opcode = OP_ATOMIC_ADD_I4;
6330 cfg->has_atomic_add_i4 = TRUE;
6332 #if SIZEOF_REGISTER == 8
6333 else if (fsig->params [0]->type == MONO_TYPE_I8)
6334 opcode = OP_ATOMIC_ADD_I8;
6337 if (!mono_arch_opcode_supported (opcode))
6339 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6340 ins_iconst->inst_c0 = -1;
6341 ins_iconst->dreg = mono_alloc_ireg (cfg);
6342 MONO_ADD_INS (cfg->cbb, ins_iconst);
6344 MONO_INST_NEW (cfg, ins, opcode);
6345 ins->dreg = mono_alloc_ireg (cfg);
6346 ins->inst_basereg = args [0]->dreg;
6347 ins->inst_offset = 0;
6348 ins->sreg2 = ins_iconst->dreg;
6349 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6350 MONO_ADD_INS (cfg->cbb, ins);
6352 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6355 if (fsig->params [0]->type == MONO_TYPE_I4) {
6356 opcode = OP_ATOMIC_ADD_I4;
6357 cfg->has_atomic_add_i4 = TRUE;
6359 #if SIZEOF_REGISTER == 8
6360 else if (fsig->params [0]->type == MONO_TYPE_I8)
6361 opcode = OP_ATOMIC_ADD_I8;
6364 if (!mono_arch_opcode_supported (opcode))
6366 MONO_INST_NEW (cfg, ins, opcode);
6367 ins->dreg = mono_alloc_ireg (cfg);
6368 ins->inst_basereg = args [0]->dreg;
6369 ins->inst_offset = 0;
6370 ins->sreg2 = args [1]->dreg;
6371 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6372 MONO_ADD_INS (cfg->cbb, ins);
6375 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6376 MonoInst *f2i = NULL, *i2f;
6377 guint32 opcode, f2i_opcode, i2f_opcode;
6378 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6379 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6381 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6382 fsig->params [0]->type == MONO_TYPE_R4) {
6383 opcode = OP_ATOMIC_EXCHANGE_I4;
6384 f2i_opcode = OP_MOVE_F_TO_I4;
6385 i2f_opcode = OP_MOVE_I4_TO_F;
6386 cfg->has_atomic_exchange_i4 = TRUE;
6388 #if SIZEOF_REGISTER == 8
6390 fsig->params [0]->type == MONO_TYPE_I8 ||
6391 fsig->params [0]->type == MONO_TYPE_R8 ||
6392 fsig->params [0]->type == MONO_TYPE_I) {
6393 opcode = OP_ATOMIC_EXCHANGE_I8;
6394 f2i_opcode = OP_MOVE_F_TO_I8;
6395 i2f_opcode = OP_MOVE_I8_TO_F;
6398 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6399 opcode = OP_ATOMIC_EXCHANGE_I4;
6400 cfg->has_atomic_exchange_i4 = TRUE;
6406 if (!mono_arch_opcode_supported (opcode))
6410 /* TODO: Decompose these opcodes instead of bailing here. */
6411 if (COMPILE_SOFT_FLOAT (cfg))
6414 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6415 f2i->dreg = mono_alloc_ireg (cfg);
6416 f2i->sreg1 = args [1]->dreg;
6417 if (f2i_opcode == OP_MOVE_F_TO_I4)
6418 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6419 MONO_ADD_INS (cfg->cbb, f2i);
6422 MONO_INST_NEW (cfg, ins, opcode);
6423 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6424 ins->inst_basereg = args [0]->dreg;
6425 ins->inst_offset = 0;
6426 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6427 MONO_ADD_INS (cfg->cbb, ins);
6429 switch (fsig->params [0]->type) {
6431 ins->type = STACK_I4;
6434 ins->type = STACK_I8;
6437 #if SIZEOF_REGISTER == 8
6438 ins->type = STACK_I8;
6440 ins->type = STACK_I4;
6445 ins->type = STACK_R8;
6448 g_assert (mini_type_is_reference (fsig->params [0]));
6449 ins->type = STACK_OBJ;
6454 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6455 i2f->dreg = mono_alloc_freg (cfg);
6456 i2f->sreg1 = ins->dreg;
6457 i2f->type = STACK_R8;
6458 if (i2f_opcode == OP_MOVE_I4_TO_F)
6459 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6460 MONO_ADD_INS (cfg->cbb, i2f);
6465 if (cfg->gen_write_barriers && is_ref)
6466 emit_write_barrier (cfg, args [0], args [1]);
6468 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6469 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6470 guint32 opcode, f2i_opcode, i2f_opcode;
6471 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6472 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6474 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6475 fsig->params [1]->type == MONO_TYPE_R4) {
6476 opcode = OP_ATOMIC_CAS_I4;
6477 f2i_opcode = OP_MOVE_F_TO_I4;
6478 i2f_opcode = OP_MOVE_I4_TO_F;
6479 cfg->has_atomic_cas_i4 = TRUE;
6481 #if SIZEOF_REGISTER == 8
6483 fsig->params [1]->type == MONO_TYPE_I8 ||
6484 fsig->params [1]->type == MONO_TYPE_R8 ||
6485 fsig->params [1]->type == MONO_TYPE_I) {
6486 opcode = OP_ATOMIC_CAS_I8;
6487 f2i_opcode = OP_MOVE_F_TO_I8;
6488 i2f_opcode = OP_MOVE_I8_TO_F;
6491 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6492 opcode = OP_ATOMIC_CAS_I4;
6493 cfg->has_atomic_cas_i4 = TRUE;
6499 if (!mono_arch_opcode_supported (opcode))
6503 /* TODO: Decompose these opcodes instead of bailing here. */
6504 if (COMPILE_SOFT_FLOAT (cfg))
6507 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6508 f2i_new->dreg = mono_alloc_ireg (cfg);
6509 f2i_new->sreg1 = args [1]->dreg;
6510 if (f2i_opcode == OP_MOVE_F_TO_I4)
6511 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6512 MONO_ADD_INS (cfg->cbb, f2i_new);
6514 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6515 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6516 f2i_cmp->sreg1 = args [2]->dreg;
6517 if (f2i_opcode == OP_MOVE_F_TO_I4)
6518 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6519 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6522 MONO_INST_NEW (cfg, ins, opcode);
6523 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6524 ins->sreg1 = args [0]->dreg;
6525 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6526 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6527 MONO_ADD_INS (cfg->cbb, ins);
6529 switch (fsig->params [1]->type) {
6531 ins->type = STACK_I4;
6534 ins->type = STACK_I8;
6537 #if SIZEOF_REGISTER == 8
6538 ins->type = STACK_I8;
6540 ins->type = STACK_I4;
6544 ins->type = cfg->r4_stack_type;
6547 ins->type = STACK_R8;
6550 g_assert (mini_type_is_reference (fsig->params [1]));
6551 ins->type = STACK_OBJ;
6556 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6557 i2f->dreg = mono_alloc_freg (cfg);
6558 i2f->sreg1 = ins->dreg;
6559 i2f->type = STACK_R8;
6560 if (i2f_opcode == OP_MOVE_I4_TO_F)
6561 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6562 MONO_ADD_INS (cfg->cbb, i2f);
6567 if (cfg->gen_write_barriers && is_ref)
6568 emit_write_barrier (cfg, args [0], args [1]);
6570 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6571 fsig->params [1]->type == MONO_TYPE_I4) {
6572 MonoInst *cmp, *ceq;
6574 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6577 /* int32 r = CAS (location, value, comparand); */
6578 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6579 ins->dreg = alloc_ireg (cfg);
6580 ins->sreg1 = args [0]->dreg;
6581 ins->sreg2 = args [1]->dreg;
6582 ins->sreg3 = args [2]->dreg;
6583 ins->type = STACK_I4;
6584 MONO_ADD_INS (cfg->cbb, ins);
6586 /* bool result = r == comparand; */
6587 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6588 cmp->sreg1 = ins->dreg;
6589 cmp->sreg2 = args [2]->dreg;
6590 cmp->type = STACK_I4;
6591 MONO_ADD_INS (cfg->cbb, cmp);
6593 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6594 ceq->dreg = alloc_ireg (cfg);
6595 ceq->type = STACK_I4;
6596 MONO_ADD_INS (cfg->cbb, ceq);
6598 /* *success = result; */
6599 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6601 cfg->has_atomic_cas_i4 = TRUE;
6603 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6604 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6608 } else if (cmethod->klass->image == mono_defaults.corlib &&
6609 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6610 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6613 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6615 MonoType *t = fsig->params [0];
6617 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6619 g_assert (t->byref);
6620 /* t is a byref type, so the reference check is more complicated */
6621 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6622 if (t->type == MONO_TYPE_I1)
6623 opcode = OP_ATOMIC_LOAD_I1;
6624 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6625 opcode = OP_ATOMIC_LOAD_U1;
6626 else if (t->type == MONO_TYPE_I2)
6627 opcode = OP_ATOMIC_LOAD_I2;
6628 else if (t->type == MONO_TYPE_U2)
6629 opcode = OP_ATOMIC_LOAD_U2;
6630 else if (t->type == MONO_TYPE_I4)
6631 opcode = OP_ATOMIC_LOAD_I4;
6632 else if (t->type == MONO_TYPE_U4)
6633 opcode = OP_ATOMIC_LOAD_U4;
6634 else if (t->type == MONO_TYPE_R4)
6635 opcode = OP_ATOMIC_LOAD_R4;
6636 else if (t->type == MONO_TYPE_R8)
6637 opcode = OP_ATOMIC_LOAD_R8;
6638 #if SIZEOF_REGISTER == 8
6639 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6640 opcode = OP_ATOMIC_LOAD_I8;
6641 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6642 opcode = OP_ATOMIC_LOAD_U8;
6644 else if (t->type == MONO_TYPE_I)
6645 opcode = OP_ATOMIC_LOAD_I4;
6646 else if (is_ref || t->type == MONO_TYPE_U)
6647 opcode = OP_ATOMIC_LOAD_U4;
6651 if (!mono_arch_opcode_supported (opcode))
6654 MONO_INST_NEW (cfg, ins, opcode);
6655 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6656 ins->sreg1 = args [0]->dreg;
6657 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6658 MONO_ADD_INS (cfg->cbb, ins);
6661 case MONO_TYPE_BOOLEAN:
6668 ins->type = STACK_I4;
6672 ins->type = STACK_I8;
6676 #if SIZEOF_REGISTER == 8
6677 ins->type = STACK_I8;
6679 ins->type = STACK_I4;
6683 ins->type = cfg->r4_stack_type;
6686 ins->type = STACK_R8;
6690 ins->type = STACK_OBJ;
6696 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6698 MonoType *t = fsig->params [0];
6701 g_assert (t->byref);
6702 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6703 if (t->type == MONO_TYPE_I1)
6704 opcode = OP_ATOMIC_STORE_I1;
6705 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6706 opcode = OP_ATOMIC_STORE_U1;
6707 else if (t->type == MONO_TYPE_I2)
6708 opcode = OP_ATOMIC_STORE_I2;
6709 else if (t->type == MONO_TYPE_U2)
6710 opcode = OP_ATOMIC_STORE_U2;
6711 else if (t->type == MONO_TYPE_I4)
6712 opcode = OP_ATOMIC_STORE_I4;
6713 else if (t->type == MONO_TYPE_U4)
6714 opcode = OP_ATOMIC_STORE_U4;
6715 else if (t->type == MONO_TYPE_R4)
6716 opcode = OP_ATOMIC_STORE_R4;
6717 else if (t->type == MONO_TYPE_R8)
6718 opcode = OP_ATOMIC_STORE_R8;
6719 #if SIZEOF_REGISTER == 8
6720 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6721 opcode = OP_ATOMIC_STORE_I8;
6722 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6723 opcode = OP_ATOMIC_STORE_U8;
6725 else if (t->type == MONO_TYPE_I)
6726 opcode = OP_ATOMIC_STORE_I4;
6727 else if (is_ref || t->type == MONO_TYPE_U)
6728 opcode = OP_ATOMIC_STORE_U4;
6732 if (!mono_arch_opcode_supported (opcode))
6735 MONO_INST_NEW (cfg, ins, opcode);
6736 ins->dreg = args [0]->dreg;
6737 ins->sreg1 = args [1]->dreg;
6738 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6739 MONO_ADD_INS (cfg->cbb, ins);
6741 if (cfg->gen_write_barriers && is_ref)
6742 emit_write_barrier (cfg, args [0], args [1]);
6748 } else if (cmethod->klass->image == mono_defaults.corlib &&
6749 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6750 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6751 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6752 if (should_insert_brekpoint (cfg->method)) {
6753 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6755 MONO_INST_NEW (cfg, ins, OP_NOP);
6756 MONO_ADD_INS (cfg->cbb, ins);
6760 } else if (cmethod->klass->image == mono_defaults.corlib &&
6761 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6762 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6763 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6765 EMIT_NEW_ICONST (cfg, ins, 1);
6767 EMIT_NEW_ICONST (cfg, ins, 0);
6770 } else if (cmethod->klass->image == mono_defaults.corlib &&
6771 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6772 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6773 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6774 /* No stack walks are currently available, so implement this as an intrinsic */
6775 MonoInst *assembly_ins;
6777 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6778 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6781 } else if (cmethod->klass->image == mono_defaults.corlib &&
6782 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6783 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6784 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6785 /* No stack walks are currently available, so implement this as an intrinsic */
6786 MonoInst *method_ins;
6787 MonoMethod *declaring = cfg->method;
6789 /* This returns the declaring generic method */
6790 if (declaring->is_inflated)
6791 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6792 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6793 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6794 cfg->no_inline = TRUE;
6795 if (cfg->method != cfg->current_method)
6796 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6799 } else if (cmethod->klass == mono_defaults.math_class) {
6801 * There is general branchless code for Min/Max, but it does not work for
6803 * http://everything2.com/?node_id=1051618
6805 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6806 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6807 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6808 !strcmp (cmethod->klass->name, "Selector")) ||
6809 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6810 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6811 !strcmp (cmethod->klass->name, "Selector"))
6813 if (cfg->backend->have_objc_get_selector &&
6814 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6815 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6816 cfg->compile_aot && !cfg->llvm_only) {
6818 MonoJumpInfoToken *ji;
6823 cfg->exception_message = g_strdup ("GetHandle");
6824 cfg->disable_llvm = TRUE;
6826 if (args [0]->opcode == OP_GOT_ENTRY) {
6827 pi = (MonoInst *)args [0]->inst_p1;
6828 g_assert (pi->opcode == OP_PATCH_INFO);
6829 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6830 ji = (MonoJumpInfoToken *)pi->inst_p0;
6832 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6833 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6836 NULLIFY_INS (args [0]);
6839 s = mono_ldstr_checked (cfg->domain, ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6840 return_val_if_nok (&cfg->error, NULL);
6841 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6842 ins->dreg = mono_alloc_ireg (cfg);
6844 ins->inst_p0 = mono_string_to_utf8_checked (s, &cfg->error);
6845 return_val_if_nok (&cfg->error, NULL);
6846 MONO_ADD_INS (cfg->cbb, ins);
6851 #ifdef MONO_ARCH_SIMD_INTRINSICS
6852 if (cfg->opt & MONO_OPT_SIMD) {
6853 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6859 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6863 if (COMPILE_LLVM (cfg)) {
6864 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6869 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6873 * This entry point could be used later for arbitrary method
6876 inline static MonoInst*
6877 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6878 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6880 if (method->klass == mono_defaults.string_class) {
6881 /* managed string allocation support */
6882 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6883 MonoInst *iargs [2];
6884 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6885 MonoMethod *managed_alloc = NULL;
6887 g_assert (vtable); /*Should not fail since it System.String*/
6888 #ifndef MONO_CROSS_COMPILE
6889 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6893 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6894 iargs [1] = args [0];
6895 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6902 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6904 MonoInst *store, *temp;
6907 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6908 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6911 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6912 * would be different than the MonoInst's used to represent arguments, and
6913 * the ldelema implementation can't deal with that.
6914 * Solution: When ldelema is used on an inline argument, create a var for
6915 * it, emit ldelema on that var, and emit the saving code below in
6916 * inline_method () if needed.
6918 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6919 cfg->args [i] = temp;
6920 /* This uses cfg->args [i] which is set by the preceeding line */
6921 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6922 store->cil_code = sp [0]->cil_code;
6927 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6928 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6930 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6932 check_inline_called_method_name_limit (MonoMethod *called_method)
6935 static const char *limit = NULL;
6937 if (limit == NULL) {
6938 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6940 if (limit_string != NULL)
6941 limit = limit_string;
6946 if (limit [0] != '\0') {
6947 char *called_method_name = mono_method_full_name (called_method, TRUE);
6949 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6950 g_free (called_method_name);
6952 //return (strncmp_result <= 0);
6953 return (strncmp_result == 0);
6960 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6962 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6965 static const char *limit = NULL;
6967 if (limit == NULL) {
6968 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6969 if (limit_string != NULL) {
6970 limit = limit_string;
6976 if (limit [0] != '\0') {
6977 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6979 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6980 g_free (caller_method_name);
6982 //return (strncmp_result <= 0);
6983 return (strncmp_result == 0);
6991 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6993 static double r8_0 = 0.0;
6994 static float r4_0 = 0.0;
6998 rtype = mini_get_underlying_type (rtype);
7002 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7003 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7004 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
7005 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7006 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
7007 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7008 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7009 ins->type = STACK_R4;
7010 ins->inst_p0 = (void*)&r4_0;
7012 MONO_ADD_INS (cfg->cbb, ins);
7013 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7014 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7015 ins->type = STACK_R8;
7016 ins->inst_p0 = (void*)&r8_0;
7018 MONO_ADD_INS (cfg->cbb, ins);
7019 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7020 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7021 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7022 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7023 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7025 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7030 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7034 rtype = mini_get_underlying_type (rtype);
7038 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7039 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7040 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7041 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7042 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7043 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7044 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7045 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7046 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7047 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7048 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7049 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7050 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7051 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7053 emit_init_rvar (cfg, dreg, rtype);
7057 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7059 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7061 MonoInst *var = cfg->locals [local];
7062 if (COMPILE_SOFT_FLOAT (cfg)) {
7064 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7065 emit_init_rvar (cfg, reg, type);
7066 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7069 emit_init_rvar (cfg, var->dreg, type);
7071 emit_dummy_init_rvar (cfg, var->dreg, type);
7078 * Return the cost of inlining CMETHOD.
7081 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7082 guchar *ip, guint real_offset, gboolean inline_always)
7085 MonoInst *ins, *rvar = NULL;
7086 MonoMethodHeader *cheader;
7087 MonoBasicBlock *ebblock, *sbblock;
7089 MonoMethod *prev_inlined_method;
7090 MonoInst **prev_locals, **prev_args;
7091 MonoType **prev_arg_types;
7092 guint prev_real_offset;
7093 GHashTable *prev_cbb_hash;
7094 MonoBasicBlock **prev_cil_offset_to_bb;
7095 MonoBasicBlock *prev_cbb;
7096 unsigned char* prev_cil_start;
7097 guint32 prev_cil_offset_to_bb_len;
7098 MonoMethod *prev_current_method;
7099 MonoGenericContext *prev_generic_context;
7100 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7102 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7104 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7105 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7108 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7109 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7114 fsig = mono_method_signature (cmethod);
7116 if (cfg->verbose_level > 2)
7117 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7119 if (!cmethod->inline_info) {
7120 cfg->stat_inlineable_methods++;
7121 cmethod->inline_info = 1;
7124 /* allocate local variables */
7125 cheader = mono_method_get_header_checked (cmethod, &error);
7127 if (inline_always) {
7128 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7129 mono_error_move (&cfg->error, &error);
7131 mono_error_cleanup (&error);
7136 /*Must verify before creating locals as it can cause the JIT to assert.*/
7137 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7138 mono_metadata_free_mh (cheader);
7142 /* allocate space to store the return value */
7143 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7144 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7147 prev_locals = cfg->locals;
7148 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7149 for (i = 0; i < cheader->num_locals; ++i)
7150 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7152 /* allocate start and end blocks */
7153 /* This is needed so if the inline is aborted, we can clean up */
7154 NEW_BBLOCK (cfg, sbblock);
7155 sbblock->real_offset = real_offset;
7157 NEW_BBLOCK (cfg, ebblock);
7158 ebblock->block_num = cfg->num_bblocks++;
7159 ebblock->real_offset = real_offset;
7161 prev_args = cfg->args;
7162 prev_arg_types = cfg->arg_types;
7163 prev_inlined_method = cfg->inlined_method;
7164 cfg->inlined_method = cmethod;
7165 cfg->ret_var_set = FALSE;
7166 cfg->inline_depth ++;
7167 prev_real_offset = cfg->real_offset;
7168 prev_cbb_hash = cfg->cbb_hash;
7169 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7170 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7171 prev_cil_start = cfg->cil_start;
7172 prev_cbb = cfg->cbb;
7173 prev_current_method = cfg->current_method;
7174 prev_generic_context = cfg->generic_context;
7175 prev_ret_var_set = cfg->ret_var_set;
7176 prev_disable_inline = cfg->disable_inline;
7178 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7181 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7183 ret_var_set = cfg->ret_var_set;
7185 cfg->inlined_method = prev_inlined_method;
7186 cfg->real_offset = prev_real_offset;
7187 cfg->cbb_hash = prev_cbb_hash;
7188 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7189 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7190 cfg->cil_start = prev_cil_start;
7191 cfg->locals = prev_locals;
7192 cfg->args = prev_args;
7193 cfg->arg_types = prev_arg_types;
7194 cfg->current_method = prev_current_method;
7195 cfg->generic_context = prev_generic_context;
7196 cfg->ret_var_set = prev_ret_var_set;
7197 cfg->disable_inline = prev_disable_inline;
7198 cfg->inline_depth --;
7200 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7201 if (cfg->verbose_level > 2)
7202 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7204 cfg->stat_inlined_methods++;
7206 /* always add some code to avoid block split failures */
7207 MONO_INST_NEW (cfg, ins, OP_NOP);
7208 MONO_ADD_INS (prev_cbb, ins);
7210 prev_cbb->next_bb = sbblock;
7211 link_bblock (cfg, prev_cbb, sbblock);
7214 * Get rid of the begin and end bblocks if possible to aid local
7217 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7219 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7220 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7222 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7223 MonoBasicBlock *prev = ebblock->in_bb [0];
7225 if (prev->next_bb == ebblock) {
7226 mono_merge_basic_blocks (cfg, prev, ebblock);
7228 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7229 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7230 cfg->cbb = prev_cbb;
7233 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7238 * Its possible that the rvar is set in some prev bblock, but not in others.
7244 for (i = 0; i < ebblock->in_count; ++i) {
7245 bb = ebblock->in_bb [i];
7247 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7250 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7260 * If the inlined method contains only a throw, then the ret var is not
7261 * set, so set it to a dummy value.
7264 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7266 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7269 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7272 if (cfg->verbose_level > 2)
7273 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7274 cfg->exception_type = MONO_EXCEPTION_NONE;
7276 /* This gets rid of the newly added bblocks */
7277 cfg->cbb = prev_cbb;
7279 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7284 * Some of these comments may well be out-of-date.
7285 * Design decisions: we do a single pass over the IL code (and we do bblock
7286 * splitting/merging in the few cases when it's required: a back jump to an IL
7287 * address that was not already seen as bblock starting point).
7288 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7289 * Complex operations are decomposed in simpler ones right away. We need to let the
7290 * arch-specific code peek and poke inside this process somehow (except when the
7291 * optimizations can take advantage of the full semantic info of coarse opcodes).
7292 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7293 * MonoInst->opcode initially is the IL opcode or some simplification of that
7294 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7295 * opcode with value bigger than OP_LAST.
7296 * At this point the IR can be handed over to an interpreter, a dumb code generator
7297 * or to the optimizing code generator that will translate it to SSA form.
7299 * Profiling directed optimizations.
7300 * We may compile by default with few or no optimizations and instrument the code
7301 * or the user may indicate what methods to optimize the most either in a config file
7302 * or through repeated runs where the compiler applies offline the optimizations to
7303 * each method and then decides if it was worth it.
7306 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7307 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7308 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7309 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7310 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7311 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7312 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7313 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7315 /* offset from br.s -> br like opcodes */
7316 #define BIG_BRANCH_OFFSET 13
7319 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7321 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7323 return b == NULL || b == bb;
7327 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7329 unsigned char *ip = start;
7330 unsigned char *target;
7333 MonoBasicBlock *bblock;
7334 const MonoOpcode *opcode;
7337 cli_addr = ip - start;
7338 i = mono_opcode_value ((const guint8 **)&ip, end);
7341 opcode = &mono_opcodes [i];
7342 switch (opcode->argument) {
7343 case MonoInlineNone:
7346 case MonoInlineString:
7347 case MonoInlineType:
7348 case MonoInlineField:
7349 case MonoInlineMethod:
7352 case MonoShortInlineR:
7359 case MonoShortInlineVar:
7360 case MonoShortInlineI:
7363 case MonoShortInlineBrTarget:
7364 target = start + cli_addr + 2 + (signed char)ip [1];
7365 GET_BBLOCK (cfg, bblock, target);
7368 GET_BBLOCK (cfg, bblock, ip);
7370 case MonoInlineBrTarget:
7371 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7372 GET_BBLOCK (cfg, bblock, target);
7375 GET_BBLOCK (cfg, bblock, ip);
7377 case MonoInlineSwitch: {
7378 guint32 n = read32 (ip + 1);
7381 cli_addr += 5 + 4 * n;
7382 target = start + cli_addr;
7383 GET_BBLOCK (cfg, bblock, target);
7385 for (j = 0; j < n; ++j) {
7386 target = start + cli_addr + (gint32)read32 (ip);
7387 GET_BBLOCK (cfg, bblock, target);
7397 g_assert_not_reached ();
7400 if (i == CEE_THROW) {
7401 unsigned char *bb_start = ip - 1;
7403 /* Find the start of the bblock containing the throw */
7405 while ((bb_start >= start) && !bblock) {
7406 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7410 bblock->out_of_line = 1;
7420 static inline MonoMethod *
7421 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7425 mono_error_init (error);
7427 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7428 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7430 method = mono_class_inflate_generic_method_checked (method, context, error);
7433 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7439 static inline MonoMethod *
7440 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7443 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7445 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7446 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7450 if (!method && !cfg)
7451 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7456 static inline MonoClass*
7457 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7462 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7463 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7465 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7466 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7469 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7470 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7473 mono_class_init (klass);
7477 static inline MonoMethodSignature*
7478 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
7480 MonoMethodSignature *fsig;
7482 mono_error_init (error);
7483 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7484 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7486 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
7487 return_val_if_nok (error, NULL);
7490 fsig = mono_inflate_generic_signature(fsig, context, error);
7496 throw_exception (void)
7498 static MonoMethod *method = NULL;
7501 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7502 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7509 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7511 MonoMethod *thrower = throw_exception ();
7514 EMIT_NEW_PCONST (cfg, args [0], ex);
7515 mono_emit_method_call (cfg, thrower, args, NULL);
7519 * Return the original method is a wrapper is specified. We can only access
7520 * the custom attributes from the original method.
7523 get_original_method (MonoMethod *method)
7525 if (method->wrapper_type == MONO_WRAPPER_NONE)
7528 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7529 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7532 /* in other cases we need to find the original method */
7533 return mono_marshal_method_from_wrapper (method);
7537 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7539 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7540 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7542 emit_throw_exception (cfg, ex);
7546 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7548 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7549 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7551 emit_throw_exception (cfg, ex);
7555 * Check that the IL instructions at ip are the array initialization
7556 * sequence and return the pointer to the data and the size.
7559 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7562 * newarr[System.Int32]
7564 * ldtoken field valuetype ...
7565 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7567 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7569 guint32 token = read32 (ip + 7);
7570 guint32 field_token = read32 (ip + 2);
7571 guint32 field_index = field_token & 0xffffff;
7573 const char *data_ptr;
7575 MonoMethod *cmethod;
7576 MonoClass *dummy_class;
7577 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7581 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7585 *out_field_token = field_token;
7587 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7590 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7592 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7593 case MONO_TYPE_BOOLEAN:
7597 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7598 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7599 case MONO_TYPE_CHAR:
7616 if (size > mono_type_size (field->type, &dummy_align))
7619 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7620 if (!image_is_dynamic (method->klass->image)) {
7621 field_index = read32 (ip + 2) & 0xffffff;
7622 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7623 data_ptr = mono_image_rva_map (method->klass->image, rva);
7624 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7625 /* for aot code we do the lookup on load */
7626 if (aot && data_ptr)
7627 return (const char *)GUINT_TO_POINTER (rva);
7629 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7631 data_ptr = mono_field_get_data (field);
7639 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7642 char *method_fname = mono_method_full_name (method, TRUE);
7644 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7647 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7648 mono_error_cleanup (&error);
7649 } else if (header->code_size == 0)
7650 method_code = g_strdup ("method body is empty.");
7652 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7653 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7654 g_free (method_fname);
7655 g_free (method_code);
7656 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7660 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7663 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7664 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7665 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7666 /* Optimize reg-reg moves away */
7668 * Can't optimize other opcodes, since sp[0] might point to
7669 * the last ins of a decomposed opcode.
7671 sp [0]->dreg = (cfg)->locals [n]->dreg;
7673 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7678 * ldloca inhibits many optimizations so try to get rid of it in common
7681 static inline unsigned char *
7682 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7692 local = read16 (ip + 2);
7696 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7697 /* From the INITOBJ case */
7698 token = read32 (ip + 2);
7699 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7700 CHECK_TYPELOAD (klass);
7701 type = mini_get_underlying_type (&klass->byval_arg);
7702 emit_init_local (cfg, local, type, TRUE);
7710 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7712 MonoInst *icall_args [16];
7713 MonoInst *call_target, *ins, *vtable_ins;
7714 int arg_reg, this_reg, vtable_reg;
7715 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7716 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7717 gboolean variant_iface = FALSE;
7722 * In llvm-only mode, vtables contain function descriptors instead of
7723 * method addresses/trampolines.
7725 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7728 slot = mono_method_get_imt_slot (cmethod);
7730 slot = mono_method_get_vtable_index (cmethod);
7732 this_reg = sp [0]->dreg;
7734 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7735 variant_iface = TRUE;
7737 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7739 * The simplest case, a normal virtual call.
7741 int slot_reg = alloc_preg (cfg);
7742 int addr_reg = alloc_preg (cfg);
7743 int arg_reg = alloc_preg (cfg);
7744 MonoBasicBlock *non_null_bb;
7746 vtable_reg = alloc_preg (cfg);
7747 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7748 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7750 /* Load the vtable slot, which contains a function descriptor. */
7751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7753 NEW_BBLOCK (cfg, non_null_bb);
7755 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7756 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7757 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7760 // FIXME: Make the wrapper use the preserveall cconv
7761 // FIXME: Use one icall per slot for small slot numbers ?
7762 icall_args [0] = vtable_ins;
7763 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7764 /* Make the icall return the vtable slot value to save some code space */
7765 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7766 ins->dreg = slot_reg;
7767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7770 MONO_START_BB (cfg, non_null_bb);
7771 /* Load the address + arg from the vtable slot */
7772 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7773 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7775 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7778 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7780 * A simple interface call
7782 * We make a call through an imt slot to obtain the function descriptor we need to call.
7783 * The imt slot contains a function descriptor for a runtime function + arg.
7785 int slot_reg = alloc_preg (cfg);
7786 int addr_reg = alloc_preg (cfg);
7787 int arg_reg = alloc_preg (cfg);
7788 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7790 vtable_reg = alloc_preg (cfg);
7791 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7792 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7795 * The slot is already initialized when the vtable is created so there is no need
7799 /* Load the imt slot, which contains a function descriptor. */
7800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7802 /* Load the address + arg of the imt thunk from the imt slot */
7803 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7804 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7806 * IMT thunks in llvm-only mode are C functions which take an info argument
7807 * plus the imt method and return the ftndesc to call.
7809 icall_args [0] = thunk_arg_ins;
7810 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7811 cmethod, MONO_RGCTX_INFO_METHOD);
7812 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7814 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7817 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7819 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7820 * dynamically extended as more instantiations are discovered.
7821 * This handles generic virtual methods both on classes and interfaces.
7823 int slot_reg = alloc_preg (cfg);
7824 int addr_reg = alloc_preg (cfg);
7825 int arg_reg = alloc_preg (cfg);
7826 int ftndesc_reg = alloc_preg (cfg);
7827 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7828 MonoBasicBlock *slowpath_bb, *end_bb;
7830 NEW_BBLOCK (cfg, slowpath_bb);
7831 NEW_BBLOCK (cfg, end_bb);
7833 vtable_reg = alloc_preg (cfg);
7834 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7836 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7838 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7840 /* Load the slot, which contains a function descriptor. */
7841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7843 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7844 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7845 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7846 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7849 /* Same as with iface calls */
7850 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7851 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7852 icall_args [0] = thunk_arg_ins;
7853 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7854 cmethod, MONO_RGCTX_INFO_METHOD);
7855 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7856 ftndesc_ins->dreg = ftndesc_reg;
7858 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7859 * they don't know about yet. Fall back to the slowpath in that case.
7861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7862 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7864 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7867 MONO_START_BB (cfg, slowpath_bb);
7868 icall_args [0] = vtable_ins;
7869 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7870 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7871 cmethod, MONO_RGCTX_INFO_METHOD);
7873 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7875 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7876 ftndesc_ins->dreg = ftndesc_reg;
7877 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7880 MONO_START_BB (cfg, end_bb);
7881 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7885 * Non-optimized cases
7887 icall_args [0] = sp [0];
7888 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7890 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7891 cmethod, MONO_RGCTX_INFO_METHOD);
7893 arg_reg = alloc_preg (cfg);
7894 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7895 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7897 g_assert (is_gsharedvt);
7899 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7901 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7904 * Pass the extra argument even if the callee doesn't receive it, most
7905 * calling conventions allow this.
7907 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7911 is_exception_class (MonoClass *klass)
7914 if (klass == mono_defaults.exception_class)
7916 klass = klass->parent;
7922 * is_jit_optimizer_disabled:
7924 * Determine whenever M's assembly has a DebuggableAttribute with the
7925 * IsJITOptimizerDisabled flag set.
7928 is_jit_optimizer_disabled (MonoMethod *m)
7931 MonoAssembly *ass = m->klass->image->assembly;
7932 MonoCustomAttrInfo* attrs;
7935 gboolean val = FALSE;
7938 if (ass->jit_optimizer_disabled_inited)
7939 return ass->jit_optimizer_disabled;
7941 klass = mono_class_try_get_debuggable_attribute_class ();
7945 ass->jit_optimizer_disabled = FALSE;
7946 mono_memory_barrier ();
7947 ass->jit_optimizer_disabled_inited = TRUE;
7951 attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
7952 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7954 for (i = 0; i < attrs->num_attrs; ++i) {
7955 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7957 MonoMethodSignature *sig;
7959 if (!attr->ctor || attr->ctor->klass != klass)
7961 /* Decode the attribute. See reflection.c */
7962 p = (const char*)attr->data;
7963 g_assert (read16 (p) == 0x0001);
7966 // FIXME: Support named parameters
7967 sig = mono_method_signature (attr->ctor);
7968 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7970 /* Two boolean arguments */
7974 mono_custom_attrs_free (attrs);
7977 ass->jit_optimizer_disabled = val;
7978 mono_memory_barrier ();
7979 ass->jit_optimizer_disabled_inited = TRUE;
7985 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7987 gboolean supported_tail_call;
7990 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7992 for (i = 0; i < fsig->param_count; ++i) {
7993 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7994 /* These can point to the current method's stack */
7995 supported_tail_call = FALSE;
7997 if (fsig->hasthis && cmethod->klass->valuetype)
7998 /* this might point to the current method's stack */
7999 supported_tail_call = FALSE;
8000 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
8001 supported_tail_call = FALSE;
8002 if (cfg->method->save_lmf)
8003 supported_tail_call = FALSE;
8004 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
8005 supported_tail_call = FALSE;
8006 if (call_opcode != CEE_CALL)
8007 supported_tail_call = FALSE;
8009 /* Debugging support */
8011 if (supported_tail_call) {
8012 if (!mono_debug_count ())
8013 supported_tail_call = FALSE;
8017 return supported_tail_call;
8023 * Handle calls made to ctors from NEWOBJ opcodes.
8026 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
8027 MonoInst **sp, guint8 *ip, int *inline_costs)
8029 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8031 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8032 mono_method_is_generic_sharable (cmethod, TRUE)) {
8033 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8034 mono_class_vtable (cfg->domain, cmethod->klass);
8035 CHECK_TYPELOAD (cmethod->klass);
8037 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8038 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8041 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8042 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8044 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8046 CHECK_TYPELOAD (cmethod->klass);
8047 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8052 /* Avoid virtual calls to ctors if possible */
8053 if (mono_class_is_marshalbyref (cmethod->klass))
8054 callvirt_this_arg = sp [0];
8056 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8057 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8058 CHECK_CFG_EXCEPTION;
8059 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8060 mono_method_check_inlining (cfg, cmethod) &&
8061 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8064 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8065 cfg->real_offset += 5;
8067 *inline_costs += costs - 5;
8069 INLINE_FAILURE ("inline failure");
8070 // FIXME-VT: Clean this up
8071 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8072 GSHAREDVT_FAILURE(*ip);
8073 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8075 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8078 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8080 if (cfg->llvm_only) {
8081 // FIXME: Avoid initializing vtable_arg
8082 emit_llvmonly_calli (cfg, fsig, sp, addr);
8084 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8086 } else if (context_used &&
8087 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8088 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8089 MonoInst *cmethod_addr;
8091 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8093 if (cfg->llvm_only) {
8094 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8095 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8096 emit_llvmonly_calli (cfg, fsig, sp, addr);
8098 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8099 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8101 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8104 INLINE_FAILURE ("ctor call");
8105 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8106 callvirt_this_arg, NULL, vtable_arg);
8113 emit_setret (MonoCompile *cfg, MonoInst *val)
8115 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8118 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8121 if (!cfg->vret_addr) {
8122 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8124 EMIT_NEW_RETLOADA (cfg, ret_addr);
8126 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8127 ins->klass = mono_class_from_mono_type (ret_type);
8130 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8131 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8132 MonoInst *iargs [1];
8136 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8137 mono_arch_emit_setret (cfg, cfg->method, conv);
8139 mono_arch_emit_setret (cfg, cfg->method, val);
8142 mono_arch_emit_setret (cfg, cfg->method, val);
8148 * mono_method_to_ir:
8150 * Translate the .net IL into linear IR.
8153 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8154 MonoInst *return_var, MonoInst **inline_args,
8155 guint inline_offset, gboolean is_virtual_call)
8158 MonoInst *ins, **sp, **stack_start;
8159 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8160 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8161 MonoMethod *cmethod, *method_definition;
8162 MonoInst **arg_array;
8163 MonoMethodHeader *header;
8165 guint32 token, ins_flag;
8167 MonoClass *constrained_class = NULL;
8168 unsigned char *ip, *end, *target, *err_pos;
8169 MonoMethodSignature *sig;
8170 MonoGenericContext *generic_context = NULL;
8171 MonoGenericContainer *generic_container = NULL;
8172 MonoType **param_types;
8173 int i, n, start_new_bblock, dreg;
8174 int num_calls = 0, inline_costs = 0;
8175 int breakpoint_id = 0;
8177 GSList *class_inits = NULL;
8178 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8180 gboolean init_locals, seq_points, skip_dead_blocks;
8181 gboolean sym_seq_points = FALSE;
8182 MonoDebugMethodInfo *minfo;
8183 MonoBitSet *seq_point_locs = NULL;
8184 MonoBitSet *seq_point_set_locs = NULL;
8186 cfg->disable_inline = is_jit_optimizer_disabled (method);
8188 /* serialization and xdomain stuff may need access to private fields and methods */
8189 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8190 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8191 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8192 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8193 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8194 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8196 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8197 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8198 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8199 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8200 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8202 image = method->klass->image;
8203 header = mono_method_get_header_checked (method, &cfg->error);
8205 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8206 goto exception_exit;
8208 generic_container = mono_method_get_generic_container (method);
8209 sig = mono_method_signature (method);
8210 num_args = sig->hasthis + sig->param_count;
8211 ip = (unsigned char*)header->code;
8212 cfg->cil_start = ip;
8213 end = ip + header->code_size;
8214 cfg->stat_cil_code_size += header->code_size;
8216 seq_points = cfg->gen_seq_points && cfg->method == method;
8218 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8219 /* We could hit a seq point before attaching to the JIT (#8338) */
8223 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8224 minfo = mono_debug_lookup_method (method);
8226 MonoSymSeqPoint *sps;
8227 int i, n_il_offsets;
8229 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8230 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8231 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8232 sym_seq_points = TRUE;
8233 for (i = 0; i < n_il_offsets; ++i) {
8234 if (sps [i].il_offset < header->code_size)
8235 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8238 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8239 /* Methods without line number info like auto-generated property accessors */
8240 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8241 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8242 sym_seq_points = TRUE;
8247 * Methods without init_locals set could cause asserts in various passes
8248 * (#497220). To work around this, we emit dummy initialization opcodes
8249 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8250 * on some platforms.
8252 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8253 init_locals = header->init_locals;
8257 method_definition = method;
8258 while (method_definition->is_inflated) {
8259 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8260 method_definition = imethod->declaring;
8263 /* SkipVerification is not allowed if core-clr is enabled */
8264 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8266 dont_verify_stloc = TRUE;
8269 if (sig->is_inflated)
8270 generic_context = mono_method_get_context (method);
8271 else if (generic_container)
8272 generic_context = &generic_container->context;
8273 cfg->generic_context = generic_context;
8276 g_assert (!sig->has_type_parameters);
8278 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8279 g_assert (method->is_inflated);
8280 g_assert (mono_method_get_context (method)->method_inst);
8282 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8283 g_assert (sig->generic_param_count);
8285 if (cfg->method == method) {
8286 cfg->real_offset = 0;
8288 cfg->real_offset = inline_offset;
8291 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8292 cfg->cil_offset_to_bb_len = header->code_size;
8294 cfg->current_method = method;
8296 if (cfg->verbose_level > 2)
8297 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8299 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8301 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8302 for (n = 0; n < sig->param_count; ++n)
8303 param_types [n + sig->hasthis] = sig->params [n];
8304 cfg->arg_types = param_types;
8306 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8307 if (cfg->method == method) {
8309 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8310 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8313 NEW_BBLOCK (cfg, start_bblock);
8314 cfg->bb_entry = start_bblock;
8315 start_bblock->cil_code = NULL;
8316 start_bblock->cil_length = 0;
8319 NEW_BBLOCK (cfg, end_bblock);
8320 cfg->bb_exit = end_bblock;
8321 end_bblock->cil_code = NULL;
8322 end_bblock->cil_length = 0;
8323 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8324 g_assert (cfg->num_bblocks == 2);
8326 arg_array = cfg->args;
8328 if (header->num_clauses) {
8329 cfg->spvars = g_hash_table_new (NULL, NULL);
8330 cfg->exvars = g_hash_table_new (NULL, NULL);
8332 /* handle exception clauses */
8333 for (i = 0; i < header->num_clauses; ++i) {
8334 MonoBasicBlock *try_bb;
8335 MonoExceptionClause *clause = &header->clauses [i];
8336 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8338 try_bb->real_offset = clause->try_offset;
8339 try_bb->try_start = TRUE;
8340 try_bb->region = ((i + 1) << 8) | clause->flags;
8341 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8342 tblock->real_offset = clause->handler_offset;
8343 tblock->flags |= BB_EXCEPTION_HANDLER;
8346 * Linking the try block with the EH block hinders inlining as we won't be able to
8347 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8349 if (COMPILE_LLVM (cfg))
8350 link_bblock (cfg, try_bb, tblock);
8352 if (*(ip + clause->handler_offset) == CEE_POP)
8353 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8355 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8356 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8357 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8358 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8359 MONO_ADD_INS (tblock, ins);
8361 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8362 /* finally clauses already have a seq point */
8363 /* seq points for filter clauses are emitted below */
8364 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8365 MONO_ADD_INS (tblock, ins);
8368 /* todo: is a fault block unsafe to optimize? */
8369 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8370 tblock->flags |= BB_EXCEPTION_UNSAFE;
8373 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8375 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8377 /* catch and filter blocks get the exception object on the stack */
8378 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8379 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8381 /* mostly like handle_stack_args (), but just sets the input args */
8382 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8383 tblock->in_scount = 1;
8384 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8385 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8389 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8390 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8391 if (!cfg->compile_llvm) {
8392 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8393 ins->dreg = tblock->in_stack [0]->dreg;
8394 MONO_ADD_INS (tblock, ins);
8397 MonoInst *dummy_use;
8400 * Add a dummy use for the exvar so its liveness info will be
8403 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8406 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8407 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8408 MONO_ADD_INS (tblock, ins);
8411 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8412 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8413 tblock->flags |= BB_EXCEPTION_HANDLER;
8414 tblock->real_offset = clause->data.filter_offset;
8415 tblock->in_scount = 1;
8416 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8417 /* The filter block shares the exvar with the handler block */
8418 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8419 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8420 MONO_ADD_INS (tblock, ins);
8424 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8425 clause->data.catch_class &&
8427 mono_class_check_context_used (clause->data.catch_class)) {
8429 * In shared generic code with catch
8430 * clauses containing type variables
8431 * the exception handling code has to
8432 * be able to get to the rgctx.
8433 * Therefore we have to make sure that
8434 * the vtable/mrgctx argument (for
8435 * static or generic methods) or the
8436 * "this" argument (for non-static
8437 * methods) are live.
8439 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8440 mini_method_get_context (method)->method_inst ||
8441 method->klass->valuetype) {
8442 mono_get_vtable_var (cfg);
8444 MonoInst *dummy_use;
8446 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8451 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8452 cfg->cbb = start_bblock;
8453 cfg->args = arg_array;
8454 mono_save_args (cfg, sig, inline_args);
8457 /* FIRST CODE BLOCK */
8458 NEW_BBLOCK (cfg, tblock);
8459 tblock->cil_code = ip;
8463 ADD_BBLOCK (cfg, tblock);
8465 if (cfg->method == method) {
8466 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8467 if (breakpoint_id) {
8468 MONO_INST_NEW (cfg, ins, OP_BREAK);
8469 MONO_ADD_INS (cfg->cbb, ins);
8473 /* we use a separate basic block for the initialization code */
8474 NEW_BBLOCK (cfg, init_localsbb);
8475 cfg->bb_init = init_localsbb;
8476 init_localsbb->real_offset = cfg->real_offset;
8477 start_bblock->next_bb = init_localsbb;
8478 init_localsbb->next_bb = cfg->cbb;
8479 link_bblock (cfg, start_bblock, init_localsbb);
8480 link_bblock (cfg, init_localsbb, cfg->cbb);
8482 cfg->cbb = init_localsbb;
8484 if (cfg->gsharedvt && cfg->method == method) {
8485 MonoGSharedVtMethodInfo *info;
8486 MonoInst *var, *locals_var;
8489 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8490 info->method = cfg->method;
8491 info->count_entries = 16;
8492 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8493 cfg->gsharedvt_info = info;
8495 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8496 /* prevent it from being register allocated */
8497 //var->flags |= MONO_INST_VOLATILE;
8498 cfg->gsharedvt_info_var = var;
8500 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8501 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8503 /* Allocate locals */
8504 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8505 /* prevent it from being register allocated */
8506 //locals_var->flags |= MONO_INST_VOLATILE;
8507 cfg->gsharedvt_locals_var = locals_var;
8509 dreg = alloc_ireg (cfg);
8510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8512 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8513 ins->dreg = locals_var->dreg;
8515 MONO_ADD_INS (cfg->cbb, ins);
8516 cfg->gsharedvt_locals_var_ins = ins;
8518 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8521 ins->flags |= MONO_INST_INIT;
8525 if (mono_security_core_clr_enabled ()) {
8526 /* check if this is native code, e.g. an icall or a p/invoke */
8527 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8528 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8530 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8531 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8533 /* if this ia a native call then it can only be JITted from platform code */
8534 if ((icall || pinvk) && method->klass && method->klass->image) {
8535 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8536 MonoException *ex = icall ? mono_get_exception_security () :
8537 mono_get_exception_method_access ();
8538 emit_throw_exception (cfg, ex);
8545 CHECK_CFG_EXCEPTION;
8547 if (header->code_size == 0)
8550 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8555 if (cfg->method == method)
8556 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8558 for (n = 0; n < header->num_locals; ++n) {
8559 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8564 /* We force the vtable variable here for all shared methods
8565 for the possibility that they might show up in a stack
8566 trace where their exact instantiation is needed. */
8567 if (cfg->gshared && method == cfg->method) {
8568 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8569 mini_method_get_context (method)->method_inst ||
8570 method->klass->valuetype) {
8571 mono_get_vtable_var (cfg);
8573 /* FIXME: Is there a better way to do this?
8574 We need the variable live for the duration
8575 of the whole method. */
8576 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8580 /* add a check for this != NULL to inlined methods */
8581 if (is_virtual_call) {
8584 NEW_ARGLOAD (cfg, arg_ins, 0);
8585 MONO_ADD_INS (cfg->cbb, arg_ins);
8586 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8589 skip_dead_blocks = !dont_verify;
8590 if (skip_dead_blocks) {
8591 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
8596 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8597 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8600 start_new_bblock = 0;
8602 if (cfg->method == method)
8603 cfg->real_offset = ip - header->code;
8605 cfg->real_offset = inline_offset;
8610 if (start_new_bblock) {
8611 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8612 if (start_new_bblock == 2) {
8613 g_assert (ip == tblock->cil_code);
8615 GET_BBLOCK (cfg, tblock, ip);
8617 cfg->cbb->next_bb = tblock;
8619 start_new_bblock = 0;
8620 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8621 if (cfg->verbose_level > 3)
8622 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8623 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8627 g_slist_free (class_inits);
8630 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8631 link_bblock (cfg, cfg->cbb, tblock);
8632 if (sp != stack_start) {
8633 handle_stack_args (cfg, stack_start, sp - stack_start);
8635 CHECK_UNVERIFIABLE (cfg);
8637 cfg->cbb->next_bb = tblock;
8639 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8640 if (cfg->verbose_level > 3)
8641 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8642 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8645 g_slist_free (class_inits);
8650 if (skip_dead_blocks) {
8651 int ip_offset = ip - header->code;
8653 if (ip_offset == bb->end)
8657 int op_size = mono_opcode_size (ip, end);
8658 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8660 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8662 if (ip_offset + op_size == bb->end) {
8663 MONO_INST_NEW (cfg, ins, OP_NOP);
8664 MONO_ADD_INS (cfg->cbb, ins);
8665 start_new_bblock = 1;
8673 * Sequence points are points where the debugger can place a breakpoint.
8674 * Currently, we generate these automatically at points where the IL
8677 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8679 * Make methods interruptable at the beginning, and at the targets of
8680 * backward branches.
8681 * Also, do this at the start of every bblock in methods with clauses too,
8682 * to be able to handle instructions with inprecise control flow like
8684 * Backward branches are handled at the end of method-to-ir ().
8686 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8687 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8689 /* Avoid sequence points on empty IL like .volatile */
8690 // FIXME: Enable this
8691 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8692 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8693 if ((sp != stack_start) && !sym_seq_point)
8694 ins->flags |= MONO_INST_NONEMPTY_STACK;
8695 MONO_ADD_INS (cfg->cbb, ins);
8698 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8701 cfg->cbb->real_offset = cfg->real_offset;
8703 if ((cfg->method == method) && cfg->coverage_info) {
8704 guint32 cil_offset = ip - header->code;
8705 cfg->coverage_info->data [cil_offset].cil_code = ip;
8707 /* TODO: Use an increment here */
8708 #if defined(TARGET_X86)
8709 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8710 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8712 MONO_ADD_INS (cfg->cbb, ins);
8714 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8719 if (cfg->verbose_level > 3)
8720 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8724 if (seq_points && !sym_seq_points && sp != stack_start) {
8726 * The C# compiler uses these nops to notify the JIT that it should
8727 * insert seq points.
8729 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8730 MONO_ADD_INS (cfg->cbb, ins);
8732 if (cfg->keep_cil_nops)
8733 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8735 MONO_INST_NEW (cfg, ins, OP_NOP);
8737 MONO_ADD_INS (cfg->cbb, ins);
8740 if (should_insert_brekpoint (cfg->method)) {
8741 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8743 MONO_INST_NEW (cfg, ins, OP_NOP);
8746 MONO_ADD_INS (cfg->cbb, ins);
8752 CHECK_STACK_OVF (1);
8753 n = (*ip)-CEE_LDARG_0;
8755 EMIT_NEW_ARGLOAD (cfg, ins, n);
8763 CHECK_STACK_OVF (1);
8764 n = (*ip)-CEE_LDLOC_0;
8766 EMIT_NEW_LOCLOAD (cfg, ins, n);
8775 n = (*ip)-CEE_STLOC_0;
8778 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8780 emit_stloc_ir (cfg, sp, header, n);
8787 CHECK_STACK_OVF (1);
8790 EMIT_NEW_ARGLOAD (cfg, ins, n);
8796 CHECK_STACK_OVF (1);
8799 NEW_ARGLOADA (cfg, ins, n);
8800 MONO_ADD_INS (cfg->cbb, ins);
8810 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8812 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8817 CHECK_STACK_OVF (1);
8820 EMIT_NEW_LOCLOAD (cfg, ins, n);
8824 case CEE_LDLOCA_S: {
8825 unsigned char *tmp_ip;
8827 CHECK_STACK_OVF (1);
8828 CHECK_LOCAL (ip [1]);
8830 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8836 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8845 CHECK_LOCAL (ip [1]);
8846 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8848 emit_stloc_ir (cfg, sp, header, ip [1]);
8853 CHECK_STACK_OVF (1);
8854 EMIT_NEW_PCONST (cfg, ins, NULL);
8855 ins->type = STACK_OBJ;
8860 CHECK_STACK_OVF (1);
8861 EMIT_NEW_ICONST (cfg, ins, -1);
8874 CHECK_STACK_OVF (1);
8875 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8881 CHECK_STACK_OVF (1);
8883 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8889 CHECK_STACK_OVF (1);
8890 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8896 CHECK_STACK_OVF (1);
8897 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8898 ins->type = STACK_I8;
8899 ins->dreg = alloc_dreg (cfg, STACK_I8);
8901 ins->inst_l = (gint64)read64 (ip);
8902 MONO_ADD_INS (cfg->cbb, ins);
8908 gboolean use_aotconst = FALSE;
8910 #ifdef TARGET_POWERPC
8911 /* FIXME: Clean this up */
8912 if (cfg->compile_aot)
8913 use_aotconst = TRUE;
8916 /* FIXME: we should really allocate this only late in the compilation process */
8917 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8919 CHECK_STACK_OVF (1);
8925 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8927 dreg = alloc_freg (cfg);
8928 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8929 ins->type = cfg->r4_stack_type;
8931 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8932 ins->type = cfg->r4_stack_type;
8933 ins->dreg = alloc_dreg (cfg, STACK_R8);
8935 MONO_ADD_INS (cfg->cbb, ins);
8945 gboolean use_aotconst = FALSE;
8947 #ifdef TARGET_POWERPC
8948 /* FIXME: Clean this up */
8949 if (cfg->compile_aot)
8950 use_aotconst = TRUE;
8953 /* FIXME: we should really allocate this only late in the compilation process */
8954 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8956 CHECK_STACK_OVF (1);
8962 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8964 dreg = alloc_freg (cfg);
8965 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8966 ins->type = STACK_R8;
8968 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8969 ins->type = STACK_R8;
8970 ins->dreg = alloc_dreg (cfg, STACK_R8);
8972 MONO_ADD_INS (cfg->cbb, ins);
8981 MonoInst *temp, *store;
8983 CHECK_STACK_OVF (1);
8987 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8988 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8990 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8993 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
9006 if (sp [0]->type == STACK_R8)
9007 /* we need to pop the value from the x86 FP stack */
9008 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
9013 MonoMethodSignature *fsig;
9016 INLINE_FAILURE ("jmp");
9017 GSHAREDVT_FAILURE (*ip);
9020 if (stack_start != sp)
9022 token = read32 (ip + 1);
9023 /* FIXME: check the signature matches */
9024 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9027 if (cfg->gshared && mono_method_check_context_used (cmethod))
9028 GENERIC_SHARING_FAILURE (CEE_JMP);
9030 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9032 fsig = mono_method_signature (cmethod);
9033 n = fsig->param_count + fsig->hasthis;
9034 if (cfg->llvm_only) {
9037 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9038 for (i = 0; i < n; ++i)
9039 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9040 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9042 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9043 * have to emit a normal return since llvm expects it.
9046 emit_setret (cfg, ins);
9047 MONO_INST_NEW (cfg, ins, OP_BR);
9048 ins->inst_target_bb = end_bblock;
9049 MONO_ADD_INS (cfg->cbb, ins);
9050 link_bblock (cfg, cfg->cbb, end_bblock);
9053 } else if (cfg->backend->have_op_tail_call) {
9054 /* Handle tail calls similarly to calls */
9057 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9058 call->method = cmethod;
9059 call->tail_call = TRUE;
9060 call->signature = mono_method_signature (cmethod);
9061 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9062 call->inst.inst_p0 = cmethod;
9063 for (i = 0; i < n; ++i)
9064 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9066 mono_arch_emit_call (cfg, call);
9067 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9068 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9070 for (i = 0; i < num_args; ++i)
9071 /* Prevent arguments from being optimized away */
9072 arg_array [i]->flags |= MONO_INST_VOLATILE;
9074 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9075 ins = (MonoInst*)call;
9076 ins->inst_p0 = cmethod;
9077 MONO_ADD_INS (cfg->cbb, ins);
9081 start_new_bblock = 1;
9086 MonoMethodSignature *fsig;
9089 token = read32 (ip + 1);
9093 //GSHAREDVT_FAILURE (*ip);
9098 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
9101 if (method->dynamic && fsig->pinvoke) {
9105 * This is a call through a function pointer using a pinvoke
9106 * signature. Have to create a wrapper and call that instead.
9107 * FIXME: This is very slow, need to create a wrapper at JIT time
9108 * instead based on the signature.
9110 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9111 EMIT_NEW_PCONST (cfg, args [1], fsig);
9113 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9116 n = fsig->param_count + fsig->hasthis;
9120 //g_assert (!virtual_ || fsig->hasthis);
9124 inline_costs += 10 * num_calls++;
9127 * Making generic calls out of gsharedvt methods.
9128 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9129 * patching gshared method addresses into a gsharedvt method.
9131 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9133 * We pass the address to the gsharedvt trampoline in the rgctx reg
9135 MonoInst *callee = addr;
9137 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9139 GSHAREDVT_FAILURE (*ip);
9143 GSHAREDVT_FAILURE (*ip);
9145 addr = emit_get_rgctx_sig (cfg, context_used,
9146 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9147 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9151 /* Prevent inlining of methods with indirect calls */
9152 INLINE_FAILURE ("indirect call");
9154 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9155 MonoJumpInfoType info_type;
9159 * Instead of emitting an indirect call, emit a direct call
9160 * with the contents of the aotconst as the patch info.
9162 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9163 info_type = (MonoJumpInfoType)addr->inst_c1;
9164 info_data = addr->inst_p0;
9166 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9167 info_data = addr->inst_right->inst_left;
9170 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
9171 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
9174 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9175 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9180 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9184 /* End of call, INS should contain the result of the call, if any */
9186 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9188 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9191 CHECK_CFG_EXCEPTION;
9195 constrained_class = NULL;
9199 case CEE_CALLVIRT: {
9200 MonoInst *addr = NULL;
9201 MonoMethodSignature *fsig = NULL;
9203 int virtual_ = *ip == CEE_CALLVIRT;
9204 gboolean pass_imt_from_rgctx = FALSE;
9205 MonoInst *imt_arg = NULL;
9206 MonoInst *keep_this_alive = NULL;
9207 gboolean pass_vtable = FALSE;
9208 gboolean pass_mrgctx = FALSE;
9209 MonoInst *vtable_arg = NULL;
9210 gboolean check_this = FALSE;
9211 gboolean supported_tail_call = FALSE;
9212 gboolean tail_call = FALSE;
9213 gboolean need_seq_point = FALSE;
9214 guint32 call_opcode = *ip;
9215 gboolean emit_widen = TRUE;
9216 gboolean push_res = TRUE;
9217 gboolean skip_ret = FALSE;
9218 gboolean delegate_invoke = FALSE;
9219 gboolean direct_icall = FALSE;
9220 gboolean constrained_partial_call = FALSE;
9221 MonoMethod *cil_method;
9224 token = read32 (ip + 1);
9228 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9231 cil_method = cmethod;
9233 if (constrained_class) {
9234 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9235 if (!mini_is_gsharedvt_klass (constrained_class)) {
9236 g_assert (!cmethod->klass->valuetype);
9237 if (!mini_type_is_reference (&constrained_class->byval_arg))
9238 constrained_partial_call = TRUE;
9242 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9243 if (cfg->verbose_level > 2)
9244 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9245 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9246 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9248 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9252 if (cfg->verbose_level > 2)
9253 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9255 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9257 * This is needed since get_method_constrained can't find
9258 * the method in klass representing a type var.
9259 * The type var is guaranteed to be a reference type in this
9262 if (!mini_is_gsharedvt_klass (constrained_class))
9263 g_assert (!cmethod->klass->valuetype);
9265 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9271 if (!dont_verify && !cfg->skip_visibility) {
9272 MonoMethod *target_method = cil_method;
9273 if (method->is_inflated) {
9274 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9277 if (!mono_method_can_access_method (method_definition, target_method) &&
9278 !mono_method_can_access_method (method, cil_method))
9279 emit_method_access_failure (cfg, method, cil_method);
9282 if (mono_security_core_clr_enabled ())
9283 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9285 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9286 /* MS.NET seems to silently convert this to a callvirt */
9291 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9292 * converts to a callvirt.
9294 * tests/bug-515884.il is an example of this behavior
9296 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9297 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9298 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9302 if (!cmethod->klass->inited)
9303 if (!mono_class_init (cmethod->klass))
9304 TYPE_LOAD_ERROR (cmethod->klass);
9306 fsig = mono_method_signature (cmethod);
9309 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9310 mini_class_is_system_array (cmethod->klass)) {
9311 array_rank = cmethod->klass->rank;
9312 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9313 direct_icall = TRUE;
9314 } else if (fsig->pinvoke) {
9315 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9316 fsig = mono_method_signature (wrapper);
9317 } else if (constrained_class) {
9319 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9323 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
9324 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9326 /* See code below */
9327 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9328 MonoBasicBlock *tbb;
9330 GET_BBLOCK (cfg, tbb, ip + 5);
9331 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9333 * We want to extend the try block to cover the call, but we can't do it if the
9334 * call is made directly since its followed by an exception check.
9336 direct_icall = FALSE;
9340 mono_save_token_info (cfg, image, token, cil_method);
9342 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9343 need_seq_point = TRUE;
9345 /* Don't support calls made using type arguments for now */
9347 if (cfg->gsharedvt) {
9348 if (mini_is_gsharedvt_signature (fsig))
9349 GSHAREDVT_FAILURE (*ip);
9353 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9354 g_assert_not_reached ();
9356 n = fsig->param_count + fsig->hasthis;
9358 if (!cfg->gshared && cmethod->klass->generic_container)
9362 g_assert (!mono_method_check_context_used (cmethod));
9366 //g_assert (!virtual_ || fsig->hasthis);
9371 * We have the `constrained.' prefix opcode.
9373 if (constrained_class) {
9374 if (mini_is_gsharedvt_klass (constrained_class)) {
9375 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9376 /* The 'Own method' case below */
9377 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9378 /* 'The type parameter is instantiated as a reference type' case below. */
9380 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9381 CHECK_CFG_EXCEPTION;
9387 if (constrained_partial_call) {
9388 gboolean need_box = TRUE;
9391 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9392 * called method is not known at compile time either. The called method could end up being
9393 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9394 * to box the receiver.
9395 * A simple solution would be to box always and make a normal virtual call, but that would
9396 * be bad performance wise.
9398 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9400 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9405 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9406 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9407 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9408 ins->klass = constrained_class;
9409 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9410 CHECK_CFG_EXCEPTION;
9411 } else if (need_box) {
9413 MonoBasicBlock *is_ref_bb, *end_bb;
9414 MonoInst *nonbox_call;
9417 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9419 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9420 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9422 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9424 NEW_BBLOCK (cfg, is_ref_bb);
9425 NEW_BBLOCK (cfg, end_bb);
9427 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9428 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9429 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9432 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9434 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9437 MONO_START_BB (cfg, is_ref_bb);
9438 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9439 ins->klass = constrained_class;
9440 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9441 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9445 MONO_START_BB (cfg, end_bb);
9448 nonbox_call->dreg = ins->dreg;
9451 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9452 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9453 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9456 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9458 * The type parameter is instantiated as a valuetype,
9459 * but that type doesn't override the method we're
9460 * calling, so we need to box `this'.
9462 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9463 ins->klass = constrained_class;
9464 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9465 CHECK_CFG_EXCEPTION;
9466 } else if (!constrained_class->valuetype) {
9467 int dreg = alloc_ireg_ref (cfg);
9470 * The type parameter is instantiated as a reference
9471 * type. We have a managed pointer on the stack, so
9472 * we need to dereference it here.
9474 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9475 ins->type = STACK_OBJ;
9478 if (cmethod->klass->valuetype) {
9481 /* Interface method */
9484 mono_class_setup_vtable (constrained_class);
9485 CHECK_TYPELOAD (constrained_class);
9486 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9488 TYPE_LOAD_ERROR (constrained_class);
9489 slot = mono_method_get_vtable_slot (cmethod);
9491 TYPE_LOAD_ERROR (cmethod->klass);
9492 cmethod = constrained_class->vtable [ioffset + slot];
9494 if (cmethod->klass == mono_defaults.enum_class) {
9495 /* Enum implements some interfaces, so treat this as the first case */
9496 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9497 ins->klass = constrained_class;
9498 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9499 CHECK_CFG_EXCEPTION;
9504 constrained_class = NULL;
9507 if (check_call_signature (cfg, fsig, sp))
9510 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9511 delegate_invoke = TRUE;
9513 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9514 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9515 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9523 * If the callee is a shared method, then its static cctor
9524 * might not get called after the call was patched.
9526 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9527 emit_class_init (cfg, cmethod->klass);
9528 CHECK_TYPELOAD (cmethod->klass);
9531 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9534 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9536 context_used = mini_method_check_context_used (cfg, cmethod);
9538 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9539 /* Generic method interface
9540 calls are resolved via a
9541 helper function and don't
9543 if (!cmethod_context || !cmethod_context->method_inst)
9544 pass_imt_from_rgctx = TRUE;
9548 * If a shared method calls another
9549 * shared method then the caller must
9550 * have a generic sharing context
9551 * because the magic trampoline
9552 * requires it. FIXME: We shouldn't
9553 * have to force the vtable/mrgctx
9554 * variable here. Instead there
9555 * should be a flag in the cfg to
9556 * request a generic sharing context.
9559 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9560 mono_get_vtable_var (cfg);
9565 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9567 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9569 CHECK_TYPELOAD (cmethod->klass);
9570 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9575 g_assert (!vtable_arg);
9577 if (!cfg->compile_aot) {
9579 * emit_get_rgctx_method () calls mono_class_vtable () so check
9580 * for type load errors before.
9582 mono_class_setup_vtable (cmethod->klass);
9583 CHECK_TYPELOAD (cmethod->klass);
9586 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9588 /* !marshalbyref is needed to properly handle generic methods + remoting */
9589 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9590 MONO_METHOD_IS_FINAL (cmethod)) &&
9591 !mono_class_is_marshalbyref (cmethod->klass)) {
9598 if (pass_imt_from_rgctx) {
9599 g_assert (!pass_vtable);
9601 imt_arg = emit_get_rgctx_method (cfg, context_used,
9602 cmethod, MONO_RGCTX_INFO_METHOD);
9606 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9608 /* Calling virtual generic methods */
9609 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9610 !(MONO_METHOD_IS_FINAL (cmethod) &&
9611 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9612 fsig->generic_param_count &&
9613 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9615 MonoInst *this_temp, *this_arg_temp, *store;
9616 MonoInst *iargs [4];
9618 g_assert (fsig->is_inflated);
9620 /* Prevent inlining of methods that contain indirect calls */
9621 INLINE_FAILURE ("virtual generic call");
9623 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9624 GSHAREDVT_FAILURE (*ip);
9626 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9627 g_assert (!imt_arg);
9629 g_assert (cmethod->is_inflated);
9630 imt_arg = emit_get_rgctx_method (cfg, context_used,
9631 cmethod, MONO_RGCTX_INFO_METHOD);
9632 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9634 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9635 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9636 MONO_ADD_INS (cfg->cbb, store);
9638 /* FIXME: This should be a managed pointer */
9639 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9641 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9642 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9643 cmethod, MONO_RGCTX_INFO_METHOD);
9644 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9645 addr = mono_emit_jit_icall (cfg,
9646 mono_helper_compile_generic_method, iargs);
9648 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9650 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9657 * Implement a workaround for the inherent races involved in locking:
9663 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9664 * try block, the Exit () won't be executed, see:
9665 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9666 * To work around this, we extend such try blocks to include the last x bytes
9667 * of the Monitor.Enter () call.
9669 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9670 MonoBasicBlock *tbb;
9672 GET_BBLOCK (cfg, tbb, ip + 5);
9674 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9675 * from Monitor.Enter like ArgumentNullException.
9677 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9678 /* Mark this bblock as needing to be extended */
9679 tbb->extend_try_block = TRUE;
9683 /* Conversion to a JIT intrinsic */
9684 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9685 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9686 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9694 if ((cfg->opt & MONO_OPT_INLINE) &&
9695 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9696 mono_method_check_inlining (cfg, cmethod)) {
9698 gboolean always = FALSE;
9700 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9701 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9702 /* Prevent inlining of methods that call wrappers */
9703 INLINE_FAILURE ("wrapper call");
9704 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9708 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9710 cfg->real_offset += 5;
9712 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9713 /* *sp is already set by inline_method */
9718 inline_costs += costs;
9724 /* Tail recursion elimination */
9725 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9726 gboolean has_vtargs = FALSE;
9729 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9730 INLINE_FAILURE ("tail call");
9732 /* keep it simple */
9733 for (i = fsig->param_count - 1; i >= 0; i--) {
9734 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9739 for (i = 0; i < n; ++i)
9740 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9741 MONO_INST_NEW (cfg, ins, OP_BR);
9742 MONO_ADD_INS (cfg->cbb, ins);
9743 tblock = start_bblock->out_bb [0];
9744 link_bblock (cfg, cfg->cbb, tblock);
9745 ins->inst_target_bb = tblock;
9746 start_new_bblock = 1;
9748 /* skip the CEE_RET, too */
9749 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9756 inline_costs += 10 * num_calls++;
9759 * Making generic calls out of gsharedvt methods.
9760 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9761 * patching gshared method addresses into a gsharedvt method.
9763 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9764 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9765 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9766 MonoRgctxInfoType info_type;
9769 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9770 //GSHAREDVT_FAILURE (*ip);
9771 // disable for possible remoting calls
9772 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9773 GSHAREDVT_FAILURE (*ip);
9774 if (fsig->generic_param_count) {
9775 /* virtual generic call */
9776 g_assert (!imt_arg);
9777 /* Same as the virtual generic case above */
9778 imt_arg = emit_get_rgctx_method (cfg, context_used,
9779 cmethod, MONO_RGCTX_INFO_METHOD);
9780 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9782 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9783 /* This can happen when we call a fully instantiated iface method */
9784 imt_arg = emit_get_rgctx_method (cfg, context_used,
9785 cmethod, MONO_RGCTX_INFO_METHOD);
9790 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9791 keep_this_alive = sp [0];
9793 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9794 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9796 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9797 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9799 if (cfg->llvm_only) {
9800 // FIXME: Avoid initializing vtable_arg
9801 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9803 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9808 /* Generic sharing */
9811 * Use this if the callee is gsharedvt sharable too, since
9812 * at runtime we might find an instantiation so the call cannot
9813 * be patched (the 'no_patch' code path in mini-trampolines.c).
9815 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9816 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9817 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9818 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9819 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9820 INLINE_FAILURE ("gshared");
9822 g_assert (cfg->gshared && cmethod);
9826 * We are compiling a call to a
9827 * generic method from shared code,
9828 * which means that we have to look up
9829 * the method in the rgctx and do an
9833 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9835 if (cfg->llvm_only) {
9836 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9837 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9839 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9840 // FIXME: Avoid initializing imt_arg/vtable_arg
9841 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9843 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9844 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9849 /* Direct calls to icalls */
9851 MonoMethod *wrapper;
9854 /* Inline the wrapper */
9855 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9857 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9858 g_assert (costs > 0);
9859 cfg->real_offset += 5;
9861 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9862 /* *sp is already set by inline_method */
9867 inline_costs += costs;
9876 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9877 MonoInst *val = sp [fsig->param_count];
9879 if (val->type == STACK_OBJ) {
9880 MonoInst *iargs [2];
9885 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9888 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9889 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9890 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9891 emit_write_barrier (cfg, addr, val);
9892 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9893 GSHAREDVT_FAILURE (*ip);
9894 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9895 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9897 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9898 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9899 if (!cmethod->klass->element_class->valuetype && !readonly)
9900 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9901 CHECK_TYPELOAD (cmethod->klass);
9904 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9907 g_assert_not_reached ();
9914 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9918 /* Tail prefix / tail call optimization */
9920 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9921 /* FIXME: runtime generic context pointer for jumps? */
9922 /* FIXME: handle this for generic sharing eventually */
9923 if ((ins_flag & MONO_INST_TAILCALL) &&
9924 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9925 supported_tail_call = TRUE;
9927 if (supported_tail_call) {
9930 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9931 INLINE_FAILURE ("tail call");
9933 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9935 if (cfg->backend->have_op_tail_call) {
9936 /* Handle tail calls similarly to normal calls */
9939 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9941 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9942 call->tail_call = TRUE;
9943 call->method = cmethod;
9944 call->signature = mono_method_signature (cmethod);
9947 * We implement tail calls by storing the actual arguments into the
9948 * argument variables, then emitting a CEE_JMP.
9950 for (i = 0; i < n; ++i) {
9951 /* Prevent argument from being register allocated */
9952 arg_array [i]->flags |= MONO_INST_VOLATILE;
9953 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9955 ins = (MonoInst*)call;
9956 ins->inst_p0 = cmethod;
9957 ins->inst_p1 = arg_array [0];
9958 MONO_ADD_INS (cfg->cbb, ins);
9959 link_bblock (cfg, cfg->cbb, end_bblock);
9960 start_new_bblock = 1;
9962 // FIXME: Eliminate unreachable epilogs
9965 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9966 * only reachable from this call.
9968 GET_BBLOCK (cfg, tblock, ip + 5);
9969 if (tblock == cfg->cbb || tblock->in_count == 0)
9978 * Synchronized wrappers.
9979 * Its hard to determine where to replace a method with its synchronized
9980 * wrapper without causing an infinite recursion. The current solution is
9981 * to add the synchronized wrapper in the trampolines, and to
9982 * change the called method to a dummy wrapper, and resolve that wrapper
9983 * to the real method in mono_jit_compile_method ().
9985 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9986 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9987 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9988 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9992 * Virtual calls in llvm-only mode.
9994 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9995 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
10000 INLINE_FAILURE ("call");
10001 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
10002 imt_arg, vtable_arg);
10004 if (tail_call && !cfg->llvm_only) {
10005 link_bblock (cfg, cfg->cbb, end_bblock);
10006 start_new_bblock = 1;
10008 // FIXME: Eliminate unreachable epilogs
10011 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
10012 * only reachable from this call.
10014 GET_BBLOCK (cfg, tblock, ip + 5);
10015 if (tblock == cfg->cbb || tblock->in_count == 0)
10022 /* End of call, INS should contain the result of the call, if any */
10024 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10027 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10032 if (keep_this_alive) {
10033 MonoInst *dummy_use;
10035 /* See mono_emit_method_call_full () */
10036 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10039 CHECK_CFG_EXCEPTION;
10043 g_assert (*ip == CEE_RET);
10047 constrained_class = NULL;
10048 if (need_seq_point)
10049 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10053 if (cfg->method != method) {
10054 /* return from inlined method */
10056 * If in_count == 0, that means the ret is unreachable due to
10057 * being preceeded by a throw. In that case, inline_method () will
10058 * handle setting the return value
10059 * (test case: test_0_inline_throw ()).
10061 if (return_var && cfg->cbb->in_count) {
10062 MonoType *ret_type = mono_method_signature (method)->ret;
10068 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10071 //g_assert (returnvar != -1);
10072 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10073 cfg->ret_var_set = TRUE;
10076 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10078 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10079 emit_pop_lmf (cfg);
10082 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10084 if (seq_points && !sym_seq_points) {
10086 * Place a seq point here too even through the IL stack is not
10087 * empty, so a step over on
10090 * will work correctly.
10092 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10093 MONO_ADD_INS (cfg->cbb, ins);
10096 g_assert (!return_var);
10100 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10103 emit_setret (cfg, *sp);
10106 if (sp != stack_start)
10108 MONO_INST_NEW (cfg, ins, OP_BR);
10110 ins->inst_target_bb = end_bblock;
10111 MONO_ADD_INS (cfg->cbb, ins);
10112 link_bblock (cfg, cfg->cbb, end_bblock);
10113 start_new_bblock = 1;
10117 MONO_INST_NEW (cfg, ins, OP_BR);
10119 target = ip + 1 + (signed char)(*ip);
10121 GET_BBLOCK (cfg, tblock, target);
10122 link_bblock (cfg, cfg->cbb, tblock);
10123 ins->inst_target_bb = tblock;
10124 if (sp != stack_start) {
10125 handle_stack_args (cfg, stack_start, sp - stack_start);
10127 CHECK_UNVERIFIABLE (cfg);
10129 MONO_ADD_INS (cfg->cbb, ins);
10130 start_new_bblock = 1;
10131 inline_costs += BRANCH_COST;
10145 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10147 target = ip + 1 + *(signed char*)ip;
10150 ADD_BINCOND (NULL);
10153 inline_costs += BRANCH_COST;
10157 MONO_INST_NEW (cfg, ins, OP_BR);
10160 target = ip + 4 + (gint32)read32(ip);
10162 GET_BBLOCK (cfg, tblock, target);
10163 link_bblock (cfg, cfg->cbb, tblock);
10164 ins->inst_target_bb = tblock;
10165 if (sp != stack_start) {
10166 handle_stack_args (cfg, stack_start, sp - stack_start);
10168 CHECK_UNVERIFIABLE (cfg);
10171 MONO_ADD_INS (cfg->cbb, ins);
10173 start_new_bblock = 1;
10174 inline_costs += BRANCH_COST;
10176 case CEE_BRFALSE_S:
10181 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10182 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10183 guint32 opsize = is_short ? 1 : 4;
10185 CHECK_OPSIZE (opsize);
10187 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10190 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10195 GET_BBLOCK (cfg, tblock, target);
10196 link_bblock (cfg, cfg->cbb, tblock);
10197 GET_BBLOCK (cfg, tblock, ip);
10198 link_bblock (cfg, cfg->cbb, tblock);
10200 if (sp != stack_start) {
10201 handle_stack_args (cfg, stack_start, sp - stack_start);
10202 CHECK_UNVERIFIABLE (cfg);
10205 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10206 cmp->sreg1 = sp [0]->dreg;
10207 type_from_op (cfg, cmp, sp [0], NULL);
10210 #if SIZEOF_REGISTER == 4
10211 if (cmp->opcode == OP_LCOMPARE_IMM) {
10212 /* Convert it to OP_LCOMPARE */
10213 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10214 ins->type = STACK_I8;
10215 ins->dreg = alloc_dreg (cfg, STACK_I8);
10217 MONO_ADD_INS (cfg->cbb, ins);
10218 cmp->opcode = OP_LCOMPARE;
10219 cmp->sreg2 = ins->dreg;
10222 MONO_ADD_INS (cfg->cbb, cmp);
10224 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10225 type_from_op (cfg, ins, sp [0], NULL);
10226 MONO_ADD_INS (cfg->cbb, ins);
10227 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10228 GET_BBLOCK (cfg, tblock, target);
10229 ins->inst_true_bb = tblock;
10230 GET_BBLOCK (cfg, tblock, ip);
10231 ins->inst_false_bb = tblock;
10232 start_new_bblock = 2;
10235 inline_costs += BRANCH_COST;
10250 MONO_INST_NEW (cfg, ins, *ip);
10252 target = ip + 4 + (gint32)read32(ip);
10255 ADD_BINCOND (NULL);
10258 inline_costs += BRANCH_COST;
10262 MonoBasicBlock **targets;
10263 MonoBasicBlock *default_bblock;
10264 MonoJumpInfoBBTable *table;
10265 int offset_reg = alloc_preg (cfg);
10266 int target_reg = alloc_preg (cfg);
10267 int table_reg = alloc_preg (cfg);
10268 int sum_reg = alloc_preg (cfg);
10269 gboolean use_op_switch;
10273 n = read32 (ip + 1);
10276 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10280 CHECK_OPSIZE (n * sizeof (guint32));
10281 target = ip + n * sizeof (guint32);
10283 GET_BBLOCK (cfg, default_bblock, target);
10284 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10286 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10287 for (i = 0; i < n; ++i) {
10288 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10289 targets [i] = tblock;
10290 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10294 if (sp != stack_start) {
10296 * Link the current bb with the targets as well, so handle_stack_args
10297 * will set their in_stack correctly.
10299 link_bblock (cfg, cfg->cbb, default_bblock);
10300 for (i = 0; i < n; ++i)
10301 link_bblock (cfg, cfg->cbb, targets [i]);
10303 handle_stack_args (cfg, stack_start, sp - stack_start);
10305 CHECK_UNVERIFIABLE (cfg);
10307 /* Undo the links */
10308 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10309 for (i = 0; i < n; ++i)
10310 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10313 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10314 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10316 for (i = 0; i < n; ++i)
10317 link_bblock (cfg, cfg->cbb, targets [i]);
10319 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10320 table->table = targets;
10321 table->table_size = n;
10323 use_op_switch = FALSE;
10325 /* ARM implements SWITCH statements differently */
10326 /* FIXME: Make it use the generic implementation */
10327 if (!cfg->compile_aot)
10328 use_op_switch = TRUE;
10331 if (COMPILE_LLVM (cfg))
10332 use_op_switch = TRUE;
10334 cfg->cbb->has_jump_table = 1;
10336 if (use_op_switch) {
10337 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10338 ins->sreg1 = src1->dreg;
10339 ins->inst_p0 = table;
10340 ins->inst_many_bb = targets;
10341 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10342 MONO_ADD_INS (cfg->cbb, ins);
10344 if (sizeof (gpointer) == 8)
10345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10349 #if SIZEOF_REGISTER == 8
10350 /* The upper word might not be zero, and we add it to a 64 bit address later */
10351 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10354 if (cfg->compile_aot) {
10355 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10357 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10358 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10359 ins->inst_p0 = table;
10360 ins->dreg = table_reg;
10361 MONO_ADD_INS (cfg->cbb, ins);
10364 /* FIXME: Use load_memindex */
10365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10367 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10369 start_new_bblock = 1;
10370 inline_costs += (BRANCH_COST * 2);
10383 case CEE_LDIND_REF:
10390 dreg = alloc_freg (cfg);
10393 dreg = alloc_lreg (cfg);
10395 case CEE_LDIND_REF:
10396 dreg = alloc_ireg_ref (cfg);
10399 dreg = alloc_preg (cfg);
10402 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10403 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10404 if (*ip == CEE_LDIND_R4)
10405 ins->type = cfg->r4_stack_type;
10406 ins->flags |= ins_flag;
10407 MONO_ADD_INS (cfg->cbb, ins);
10409 if (ins_flag & MONO_INST_VOLATILE) {
10410 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10411 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10416 case CEE_STIND_REF:
10427 if (ins_flag & MONO_INST_VOLATILE) {
10428 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10429 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10432 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10433 ins->flags |= ins_flag;
10436 MONO_ADD_INS (cfg->cbb, ins);
10438 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10439 emit_write_barrier (cfg, sp [0], sp [1]);
10448 MONO_INST_NEW (cfg, ins, (*ip));
10450 ins->sreg1 = sp [0]->dreg;
10451 ins->sreg2 = sp [1]->dreg;
10452 type_from_op (cfg, ins, sp [0], sp [1]);
10454 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10456 /* Use the immediate opcodes if possible */
10457 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10458 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10459 if (imm_opcode != -1) {
10460 ins->opcode = imm_opcode;
10461 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10464 NULLIFY_INS (sp [1]);
10468 MONO_ADD_INS ((cfg)->cbb, (ins));
10470 *sp++ = mono_decompose_opcode (cfg, ins);
10487 MONO_INST_NEW (cfg, ins, (*ip));
10489 ins->sreg1 = sp [0]->dreg;
10490 ins->sreg2 = sp [1]->dreg;
10491 type_from_op (cfg, ins, sp [0], sp [1]);
10493 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10494 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10496 /* FIXME: Pass opcode to is_inst_imm */
10498 /* Use the immediate opcodes if possible */
10499 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10500 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10501 if (imm_opcode != -1) {
10502 ins->opcode = imm_opcode;
10503 if (sp [1]->opcode == OP_I8CONST) {
10504 #if SIZEOF_REGISTER == 8
10505 ins->inst_imm = sp [1]->inst_l;
10507 ins->inst_ls_word = sp [1]->inst_ls_word;
10508 ins->inst_ms_word = sp [1]->inst_ms_word;
10512 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10515 /* Might be followed by an instruction added by add_widen_op */
10516 if (sp [1]->next == NULL)
10517 NULLIFY_INS (sp [1]);
10520 MONO_ADD_INS ((cfg)->cbb, (ins));
10522 *sp++ = mono_decompose_opcode (cfg, ins);
10535 case CEE_CONV_OVF_I8:
10536 case CEE_CONV_OVF_U8:
10537 case CEE_CONV_R_UN:
10540 /* Special case this earlier so we have long constants in the IR */
10541 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10542 int data = sp [-1]->inst_c0;
10543 sp [-1]->opcode = OP_I8CONST;
10544 sp [-1]->type = STACK_I8;
10545 #if SIZEOF_REGISTER == 8
10546 if ((*ip) == CEE_CONV_U8)
10547 sp [-1]->inst_c0 = (guint32)data;
10549 sp [-1]->inst_c0 = data;
10551 sp [-1]->inst_ls_word = data;
10552 if ((*ip) == CEE_CONV_U8)
10553 sp [-1]->inst_ms_word = 0;
10555 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10557 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10564 case CEE_CONV_OVF_I4:
10565 case CEE_CONV_OVF_I1:
10566 case CEE_CONV_OVF_I2:
10567 case CEE_CONV_OVF_I:
10568 case CEE_CONV_OVF_U:
10571 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10572 ADD_UNOP (CEE_CONV_OVF_I8);
10579 case CEE_CONV_OVF_U1:
10580 case CEE_CONV_OVF_U2:
10581 case CEE_CONV_OVF_U4:
10584 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10585 ADD_UNOP (CEE_CONV_OVF_U8);
10592 case CEE_CONV_OVF_I1_UN:
10593 case CEE_CONV_OVF_I2_UN:
10594 case CEE_CONV_OVF_I4_UN:
10595 case CEE_CONV_OVF_I8_UN:
10596 case CEE_CONV_OVF_U1_UN:
10597 case CEE_CONV_OVF_U2_UN:
10598 case CEE_CONV_OVF_U4_UN:
10599 case CEE_CONV_OVF_U8_UN:
10600 case CEE_CONV_OVF_I_UN:
10601 case CEE_CONV_OVF_U_UN:
10608 CHECK_CFG_EXCEPTION;
10612 case CEE_ADD_OVF_UN:
10614 case CEE_MUL_OVF_UN:
10616 case CEE_SUB_OVF_UN:
10622 GSHAREDVT_FAILURE (*ip);
10625 token = read32 (ip + 1);
10626 klass = mini_get_class (method, token, generic_context);
10627 CHECK_TYPELOAD (klass);
10629 if (generic_class_is_reference_type (cfg, klass)) {
10630 MonoInst *store, *load;
10631 int dreg = alloc_ireg_ref (cfg);
10633 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10634 load->flags |= ins_flag;
10635 MONO_ADD_INS (cfg->cbb, load);
10637 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10638 store->flags |= ins_flag;
10639 MONO_ADD_INS (cfg->cbb, store);
10641 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10642 emit_write_barrier (cfg, sp [0], sp [1]);
10644 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10650 int loc_index = -1;
10656 token = read32 (ip + 1);
10657 klass = mini_get_class (method, token, generic_context);
10658 CHECK_TYPELOAD (klass);
10660 /* Optimize the common ldobj+stloc combination */
10663 loc_index = ip [6];
10670 loc_index = ip [5] - CEE_STLOC_0;
10677 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10678 CHECK_LOCAL (loc_index);
10680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10681 ins->dreg = cfg->locals [loc_index]->dreg;
10682 ins->flags |= ins_flag;
10685 if (ins_flag & MONO_INST_VOLATILE) {
10686 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10687 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10693 /* Optimize the ldobj+stobj combination */
10694 /* The reference case ends up being a load+store anyway */
10695 /* Skip this if the operation is volatile. */
10696 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10701 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10708 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10709 ins->flags |= ins_flag;
10712 if (ins_flag & MONO_INST_VOLATILE) {
10713 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10714 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10723 CHECK_STACK_OVF (1);
10725 n = read32 (ip + 1);
10727 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10728 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10729 ins->type = STACK_OBJ;
10732 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10733 MonoInst *iargs [1];
10734 char *str = (char *)mono_method_get_wrapper_data (method, n);
10736 if (cfg->compile_aot)
10737 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10739 EMIT_NEW_PCONST (cfg, iargs [0], str);
10740 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10742 if (cfg->opt & MONO_OPT_SHARED) {
10743 MonoInst *iargs [3];
10745 if (cfg->compile_aot) {
10746 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10748 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10749 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10750 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10751 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10752 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10755 if (cfg->cbb->out_of_line) {
10756 MonoInst *iargs [2];
10758 if (image == mono_defaults.corlib) {
10760 * Avoid relocations in AOT and save some space by using a
10761 * version of helper_ldstr specialized to mscorlib.
10763 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10764 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10766 /* Avoid creating the string object */
10767 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10768 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10769 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10773 if (cfg->compile_aot) {
10774 NEW_LDSTRCONST (cfg, ins, image, n);
10776 MONO_ADD_INS (cfg->cbb, ins);
10779 NEW_PCONST (cfg, ins, NULL);
10780 ins->type = STACK_OBJ;
10781 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10785 OUT_OF_MEMORY_FAILURE;
10788 MONO_ADD_INS (cfg->cbb, ins);
10797 MonoInst *iargs [2];
10798 MonoMethodSignature *fsig;
10801 MonoInst *vtable_arg = NULL;
10804 token = read32 (ip + 1);
10805 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10808 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10811 mono_save_token_info (cfg, image, token, cmethod);
10813 if (!mono_class_init (cmethod->klass))
10814 TYPE_LOAD_ERROR (cmethod->klass);
10816 context_used = mini_method_check_context_used (cfg, cmethod);
10818 if (mono_security_core_clr_enabled ())
10819 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10821 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10822 emit_class_init (cfg, cmethod->klass);
10823 CHECK_TYPELOAD (cmethod->klass);
10827 if (cfg->gsharedvt) {
10828 if (mini_is_gsharedvt_variable_signature (sig))
10829 GSHAREDVT_FAILURE (*ip);
10833 n = fsig->param_count;
10837 * Generate smaller code for the common newobj <exception> instruction in
10838 * argument checking code.
10840 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10841 is_exception_class (cmethod->klass) && n <= 2 &&
10842 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10843 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10844 MonoInst *iargs [3];
10848 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10851 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10854 iargs [1] = sp [0];
10855 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10858 iargs [1] = sp [0];
10859 iargs [2] = sp [1];
10860 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10863 g_assert_not_reached ();
10871 /* move the args to allow room for 'this' in the first position */
10877 /* check_call_signature () requires sp[0] to be set */
10878 this_ins.type = STACK_OBJ;
10879 sp [0] = &this_ins;
10880 if (check_call_signature (cfg, fsig, sp))
10885 if (mini_class_is_system_array (cmethod->klass)) {
10886 *sp = emit_get_rgctx_method (cfg, context_used,
10887 cmethod, MONO_RGCTX_INFO_METHOD);
10889 /* Avoid varargs in the common case */
10890 if (fsig->param_count == 1)
10891 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10892 else if (fsig->param_count == 2)
10893 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10894 else if (fsig->param_count == 3)
10895 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10896 else if (fsig->param_count == 4)
10897 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10899 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10900 } else if (cmethod->string_ctor) {
10901 g_assert (!context_used);
10902 g_assert (!vtable_arg);
10903 /* we simply pass a null pointer */
10904 EMIT_NEW_PCONST (cfg, *sp, NULL);
10905 /* now call the string ctor */
10906 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10908 if (cmethod->klass->valuetype) {
10909 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10910 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10911 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10916 * The code generated by mini_emit_virtual_call () expects
10917 * iargs [0] to be a boxed instance, but luckily the vcall
10918 * will be transformed into a normal call there.
10920 } else if (context_used) {
10921 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10924 MonoVTable *vtable = NULL;
10926 if (!cfg->compile_aot)
10927 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10928 CHECK_TYPELOAD (cmethod->klass);
10931 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10932 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10933 * As a workaround, we call class cctors before allocating objects.
10935 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10936 emit_class_init (cfg, cmethod->klass);
10937 if (cfg->verbose_level > 2)
10938 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10939 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10942 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10945 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10948 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10950 /* Now call the actual ctor */
10951 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10952 CHECK_CFG_EXCEPTION;
10955 if (alloc == NULL) {
10957 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10958 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10966 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10967 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10970 case CEE_CASTCLASS:
10974 token = read32 (ip + 1);
10975 klass = mini_get_class (method, token, generic_context);
10976 CHECK_TYPELOAD (klass);
10977 if (sp [0]->type != STACK_OBJ)
10980 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10981 CHECK_CFG_EXCEPTION;
10990 token = read32 (ip + 1);
10991 klass = mini_get_class (method, token, generic_context);
10992 CHECK_TYPELOAD (klass);
10993 if (sp [0]->type != STACK_OBJ)
10996 context_used = mini_class_check_context_used (cfg, klass);
10998 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10999 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
11000 MonoInst *args [3];
11007 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
11010 idx = get_castclass_cache_idx (cfg);
11011 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
11013 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
11016 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
11017 MonoMethod *mono_isinst;
11018 MonoInst *iargs [1];
11021 mono_isinst = mono_marshal_get_isinst (klass);
11022 iargs [0] = sp [0];
11024 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
11025 iargs, ip, cfg->real_offset, TRUE);
11026 CHECK_CFG_EXCEPTION;
11027 g_assert (costs > 0);
11030 cfg->real_offset += 5;
11034 inline_costs += costs;
11037 ins = handle_isinst (cfg, klass, *sp, context_used);
11038 CHECK_CFG_EXCEPTION;
11044 case CEE_UNBOX_ANY: {
11045 MonoInst *res, *addr;
11050 token = read32 (ip + 1);
11051 klass = mini_get_class (method, token, generic_context);
11052 CHECK_TYPELOAD (klass);
11054 mono_save_token_info (cfg, image, token, klass);
11056 context_used = mini_class_check_context_used (cfg, klass);
11058 if (mini_is_gsharedvt_klass (klass)) {
11059 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11061 } else if (generic_class_is_reference_type (cfg, klass)) {
11062 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
11063 CHECK_CFG_EXCEPTION;
11064 } else if (mono_class_is_nullable (klass)) {
11065 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11067 addr = handle_unbox (cfg, klass, sp, context_used);
11069 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11080 MonoClass *enum_class;
11081 MonoMethod *has_flag;
11087 token = read32 (ip + 1);
11088 klass = mini_get_class (method, token, generic_context);
11089 CHECK_TYPELOAD (klass);
11091 mono_save_token_info (cfg, image, token, klass);
11093 context_used = mini_class_check_context_used (cfg, klass);
11095 if (generic_class_is_reference_type (cfg, klass)) {
11101 if (klass == mono_defaults.void_class)
11103 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11105 /* frequent check in generic code: box (struct), brtrue */
11110 * <push int/long ptr>
11113 * constrained. MyFlags
11114 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11116 * If we find this sequence and the operand types on box and constrained
11117 * are equal, we can emit a specialized instruction sequence instead of
11118 * the very slow HasFlag () call.
11120 if ((cfg->opt & MONO_OPT_INTRINS) &&
11121 /* Cheap checks first. */
11122 ip + 5 + 6 + 5 < end &&
11123 ip [5] == CEE_PREFIX1 &&
11124 ip [6] == CEE_CONSTRAINED_ &&
11125 ip [11] == CEE_CALLVIRT &&
11126 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11127 mono_class_is_enum (klass) &&
11128 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11129 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11130 has_flag->klass == mono_defaults.enum_class &&
11131 !strcmp (has_flag->name, "HasFlag") &&
11132 has_flag->signature->hasthis &&
11133 has_flag->signature->param_count == 1) {
11134 CHECK_TYPELOAD (enum_class);
11136 if (enum_class == klass) {
11137 MonoInst *enum_this, *enum_flag;
11142 enum_this = sp [0];
11143 enum_flag = sp [1];
11145 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11150 // FIXME: LLVM can't handle the inconsistent bb linking
11151 if (!mono_class_is_nullable (klass) &&
11152 !mini_is_gsharedvt_klass (klass) &&
11153 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11154 (ip [5] == CEE_BRTRUE ||
11155 ip [5] == CEE_BRTRUE_S ||
11156 ip [5] == CEE_BRFALSE ||
11157 ip [5] == CEE_BRFALSE_S)) {
11158 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11160 MonoBasicBlock *true_bb, *false_bb;
11164 if (cfg->verbose_level > 3) {
11165 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11166 printf ("<box+brtrue opt>\n");
11171 case CEE_BRFALSE_S:
11174 target = ip + 1 + (signed char)(*ip);
11181 target = ip + 4 + (gint)(read32 (ip));
11185 g_assert_not_reached ();
11189 * We need to link both bblocks, since it is needed for handling stack
11190 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11191 * Branching to only one of them would lead to inconsistencies, so
11192 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11194 GET_BBLOCK (cfg, true_bb, target);
11195 GET_BBLOCK (cfg, false_bb, ip);
11197 mono_link_bblock (cfg, cfg->cbb, true_bb);
11198 mono_link_bblock (cfg, cfg->cbb, false_bb);
11200 if (sp != stack_start) {
11201 handle_stack_args (cfg, stack_start, sp - stack_start);
11203 CHECK_UNVERIFIABLE (cfg);
11206 if (COMPILE_LLVM (cfg)) {
11207 dreg = alloc_ireg (cfg);
11208 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11211 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11213 /* The JIT can't eliminate the iconst+compare */
11214 MONO_INST_NEW (cfg, ins, OP_BR);
11215 ins->inst_target_bb = is_true ? true_bb : false_bb;
11216 MONO_ADD_INS (cfg->cbb, ins);
11219 start_new_bblock = 1;
11223 *sp++ = handle_box (cfg, val, klass, context_used);
11225 CHECK_CFG_EXCEPTION;
11234 token = read32 (ip + 1);
11235 klass = mini_get_class (method, token, generic_context);
11236 CHECK_TYPELOAD (klass);
11238 mono_save_token_info (cfg, image, token, klass);
11240 context_used = mini_class_check_context_used (cfg, klass);
11242 if (mono_class_is_nullable (klass)) {
11245 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11246 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11250 ins = handle_unbox (cfg, klass, sp, context_used);
11263 MonoClassField *field;
11264 #ifndef DISABLE_REMOTING
11268 gboolean is_instance;
11270 gpointer addr = NULL;
11271 gboolean is_special_static;
11273 MonoInst *store_val = NULL;
11274 MonoInst *thread_ins;
11277 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11279 if (op == CEE_STFLD) {
11282 store_val = sp [1];
11287 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11289 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11292 if (op == CEE_STSFLD) {
11295 store_val = sp [0];
11300 token = read32 (ip + 1);
11301 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11302 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11303 klass = field->parent;
11306 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11309 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11310 FIELD_ACCESS_FAILURE (method, field);
11311 mono_class_init (klass);
11313 /* if the class is Critical then transparent code cannot access it's fields */
11314 if (!is_instance && mono_security_core_clr_enabled ())
11315 ensure_method_is_allowed_to_access_field (cfg, method, field);
11317 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11318 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11319 if (mono_security_core_clr_enabled ())
11320 ensure_method_is_allowed_to_access_field (cfg, method, field);
11323 ftype = mono_field_get_type (field);
11326 * LDFLD etc. is usable on static fields as well, so convert those cases to
11329 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11341 g_assert_not_reached ();
11343 is_instance = FALSE;
11346 context_used = mini_class_check_context_used (cfg, klass);
11348 /* INSTANCE CASE */
11350 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11351 if (op == CEE_STFLD) {
11352 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11354 #ifndef DISABLE_REMOTING
11355 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11356 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11357 MonoInst *iargs [5];
11359 GSHAREDVT_FAILURE (op);
11361 iargs [0] = sp [0];
11362 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11363 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11364 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11366 iargs [4] = sp [1];
11368 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11369 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11370 iargs, ip, cfg->real_offset, TRUE);
11371 CHECK_CFG_EXCEPTION;
11372 g_assert (costs > 0);
11374 cfg->real_offset += 5;
11376 inline_costs += costs;
11378 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11383 MonoInst *store, *wbarrier_ptr_ins = NULL;
11385 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11387 if (mini_is_gsharedvt_klass (klass)) {
11388 MonoInst *offset_ins;
11390 context_used = mini_class_check_context_used (cfg, klass);
11392 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11393 /* The value is offset by 1 */
11394 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11395 dreg = alloc_ireg_mp (cfg);
11396 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11397 wbarrier_ptr_ins = ins;
11398 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11399 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11401 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11403 if (sp [0]->opcode != OP_LDADDR)
11404 store->flags |= MONO_INST_FAULT;
11406 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11407 if (mini_is_gsharedvt_klass (klass)) {
11408 g_assert (wbarrier_ptr_ins);
11409 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
11411 /* insert call to write barrier */
11415 dreg = alloc_ireg_mp (cfg);
11416 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11417 emit_write_barrier (cfg, ptr, sp [1]);
11421 store->flags |= ins_flag;
11428 #ifndef DISABLE_REMOTING
11429 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11430 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11431 MonoInst *iargs [4];
11433 GSHAREDVT_FAILURE (op);
11435 iargs [0] = sp [0];
11436 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11437 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11438 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11439 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11440 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11441 iargs, ip, cfg->real_offset, TRUE);
11442 CHECK_CFG_EXCEPTION;
11443 g_assert (costs > 0);
11445 cfg->real_offset += 5;
11449 inline_costs += costs;
11451 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11457 if (sp [0]->type == STACK_VTYPE) {
11460 /* Have to compute the address of the variable */
11462 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11464 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11466 g_assert (var->klass == klass);
11468 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11472 if (op == CEE_LDFLDA) {
11473 if (sp [0]->type == STACK_OBJ) {
11474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11475 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11478 dreg = alloc_ireg_mp (cfg);
11480 if (mini_is_gsharedvt_klass (klass)) {
11481 MonoInst *offset_ins;
11483 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11484 /* The value is offset by 1 */
11485 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11486 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11488 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11490 ins->klass = mono_class_from_mono_type (field->type);
11491 ins->type = STACK_MP;
11496 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11498 if (mini_is_gsharedvt_klass (klass)) {
11499 MonoInst *offset_ins;
11501 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11502 /* The value is offset by 1 */
11503 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11504 dreg = alloc_ireg_mp (cfg);
11505 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11506 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11508 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11510 load->flags |= ins_flag;
11511 if (sp [0]->opcode != OP_LDADDR)
11512 load->flags |= MONO_INST_FAULT;
11524 context_used = mini_class_check_context_used (cfg, klass);
11526 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
11527 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
11531 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11532 * to be called here.
11534 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11535 mono_class_vtable (cfg->domain, klass);
11536 CHECK_TYPELOAD (klass);
11538 mono_domain_lock (cfg->domain);
11539 if (cfg->domain->special_static_fields)
11540 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11541 mono_domain_unlock (cfg->domain);
11543 is_special_static = mono_class_field_is_special_static (field);
11545 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11546 thread_ins = mono_get_thread_intrinsic (cfg);
11550 /* Generate IR to compute the field address */
11551 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11553 * Fast access to TLS data
11554 * Inline version of get_thread_static_data () in
11558 int idx, static_data_reg, array_reg, dreg;
11560 GSHAREDVT_FAILURE (op);
11562 MONO_ADD_INS (cfg->cbb, thread_ins);
11563 static_data_reg = alloc_ireg (cfg);
11564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11566 if (cfg->compile_aot) {
11567 int offset_reg, offset2_reg, idx_reg;
11569 /* For TLS variables, this will return the TLS offset */
11570 EMIT_NEW_SFLDACONST (cfg, ins, field);
11571 offset_reg = ins->dreg;
11572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11573 idx_reg = alloc_ireg (cfg);
11574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11576 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11577 array_reg = alloc_ireg (cfg);
11578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11579 offset2_reg = alloc_ireg (cfg);
11580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11582 dreg = alloc_ireg (cfg);
11583 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11585 offset = (gsize)addr & 0x7fffffff;
11586 idx = offset & 0x3f;
11588 array_reg = alloc_ireg (cfg);
11589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11590 dreg = alloc_ireg (cfg);
11591 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11593 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11594 (cfg->compile_aot && is_special_static) ||
11595 (context_used && is_special_static)) {
11596 MonoInst *iargs [2];
11598 g_assert (field->parent);
11599 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11600 if (context_used) {
11601 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11602 field, MONO_RGCTX_INFO_CLASS_FIELD);
11604 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11606 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11607 } else if (context_used) {
11608 MonoInst *static_data;
11611 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11612 method->klass->name_space, method->klass->name, method->name,
11613 depth, field->offset);
11616 if (mono_class_needs_cctor_run (klass, method))
11617 emit_class_init (cfg, klass);
11620 * The pointer we're computing here is
11622 * super_info.static_data + field->offset
11624 static_data = emit_get_rgctx_klass (cfg, context_used,
11625 klass, MONO_RGCTX_INFO_STATIC_DATA);
11627 if (mini_is_gsharedvt_klass (klass)) {
11628 MonoInst *offset_ins;
11630 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11631 /* The value is offset by 1 */
11632 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11633 dreg = alloc_ireg_mp (cfg);
11634 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11635 } else if (field->offset == 0) {
11638 int addr_reg = mono_alloc_preg (cfg);
11639 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11641 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11642 MonoInst *iargs [2];
11644 g_assert (field->parent);
11645 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11646 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11647 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11649 MonoVTable *vtable = NULL;
11651 if (!cfg->compile_aot)
11652 vtable = mono_class_vtable (cfg->domain, klass);
11653 CHECK_TYPELOAD (klass);
11656 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11657 if (!(g_slist_find (class_inits, klass))) {
11658 emit_class_init (cfg, klass);
11659 if (cfg->verbose_level > 2)
11660 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11661 class_inits = g_slist_prepend (class_inits, klass);
11664 if (cfg->run_cctors) {
11665 /* This makes so that inline cannot trigger */
11666 /* .cctors: too many apps depend on them */
11667 /* running with a specific order... */
11669 if (! vtable->initialized)
11670 INLINE_FAILURE ("class init");
11671 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11672 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11673 goto exception_exit;
11677 if (cfg->compile_aot)
11678 EMIT_NEW_SFLDACONST (cfg, ins, field);
11681 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11683 EMIT_NEW_PCONST (cfg, ins, addr);
11686 MonoInst *iargs [1];
11687 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11688 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11692 /* Generate IR to do the actual load/store operation */
11694 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11695 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11696 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11699 if (op == CEE_LDSFLDA) {
11700 ins->klass = mono_class_from_mono_type (ftype);
11701 ins->type = STACK_PTR;
11703 } else if (op == CEE_STSFLD) {
11706 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11707 store->flags |= ins_flag;
11709 gboolean is_const = FALSE;
11710 MonoVTable *vtable = NULL;
11711 gpointer addr = NULL;
11713 if (!context_used) {
11714 vtable = mono_class_vtable (cfg->domain, klass);
11715 CHECK_TYPELOAD (klass);
11717 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11718 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11719 int ro_type = ftype->type;
11721 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11722 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11723 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11726 GSHAREDVT_FAILURE (op);
11728 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11731 case MONO_TYPE_BOOLEAN:
11733 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11737 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11740 case MONO_TYPE_CHAR:
11742 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11746 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11751 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11755 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11760 case MONO_TYPE_PTR:
11761 case MONO_TYPE_FNPTR:
11762 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11763 type_to_eval_stack_type ((cfg), field->type, *sp);
11766 case MONO_TYPE_STRING:
11767 case MONO_TYPE_OBJECT:
11768 case MONO_TYPE_CLASS:
11769 case MONO_TYPE_SZARRAY:
11770 case MONO_TYPE_ARRAY:
11771 if (!mono_gc_is_moving ()) {
11772 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11773 type_to_eval_stack_type ((cfg), field->type, *sp);
11781 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11786 case MONO_TYPE_VALUETYPE:
11796 CHECK_STACK_OVF (1);
11798 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11799 load->flags |= ins_flag;
11805 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11806 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11807 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11818 token = read32 (ip + 1);
11819 klass = mini_get_class (method, token, generic_context);
11820 CHECK_TYPELOAD (klass);
11821 if (ins_flag & MONO_INST_VOLATILE) {
11822 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11823 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11825 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11826 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11827 ins->flags |= ins_flag;
11828 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11829 generic_class_is_reference_type (cfg, klass)) {
11830 /* insert call to write barrier */
11831 emit_write_barrier (cfg, sp [0], sp [1]);
11843 const char *data_ptr;
11845 guint32 field_token;
11851 token = read32 (ip + 1);
11853 klass = mini_get_class (method, token, generic_context);
11854 CHECK_TYPELOAD (klass);
11856 context_used = mini_class_check_context_used (cfg, klass);
11858 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11859 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11860 ins->sreg1 = sp [0]->dreg;
11861 ins->type = STACK_I4;
11862 ins->dreg = alloc_ireg (cfg);
11863 MONO_ADD_INS (cfg->cbb, ins);
11864 *sp = mono_decompose_opcode (cfg, ins);
11867 if (context_used) {
11868 MonoInst *args [3];
11869 MonoClass *array_class = mono_array_class_get (klass, 1);
11870 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11872 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11875 args [0] = emit_get_rgctx_klass (cfg, context_used,
11876 array_class, MONO_RGCTX_INFO_VTABLE);
11881 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11883 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11885 if (cfg->opt & MONO_OPT_SHARED) {
11886 /* Decompose now to avoid problems with references to the domainvar */
11887 MonoInst *iargs [3];
11889 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11890 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11891 iargs [2] = sp [0];
11893 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11895 /* Decompose later since it is needed by abcrem */
11896 MonoClass *array_type = mono_array_class_get (klass, 1);
11897 mono_class_vtable (cfg->domain, array_type);
11898 CHECK_TYPELOAD (array_type);
11900 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11901 ins->dreg = alloc_ireg_ref (cfg);
11902 ins->sreg1 = sp [0]->dreg;
11903 ins->inst_newa_class = klass;
11904 ins->type = STACK_OBJ;
11905 ins->klass = array_type;
11906 MONO_ADD_INS (cfg->cbb, ins);
11907 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11908 cfg->cbb->has_array_access = TRUE;
11910 /* Needed so mono_emit_load_get_addr () gets called */
11911 mono_get_got_var (cfg);
11921 * we inline/optimize the initialization sequence if possible.
11922 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11923 * for small sizes open code the memcpy
11924 * ensure the rva field is big enough
11926 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11927 MonoMethod *memcpy_method = get_memcpy_method ();
11928 MonoInst *iargs [3];
11929 int add_reg = alloc_ireg_mp (cfg);
11931 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11932 if (cfg->compile_aot) {
11933 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11935 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11937 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11938 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11947 if (sp [0]->type != STACK_OBJ)
11950 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11951 ins->dreg = alloc_preg (cfg);
11952 ins->sreg1 = sp [0]->dreg;
11953 ins->type = STACK_I4;
11954 /* This flag will be inherited by the decomposition */
11955 ins->flags |= MONO_INST_FAULT;
11956 MONO_ADD_INS (cfg->cbb, ins);
11957 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11958 cfg->cbb->has_array_access = TRUE;
11966 if (sp [0]->type != STACK_OBJ)
11969 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11971 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11972 CHECK_TYPELOAD (klass);
11973 /* we need to make sure that this array is exactly the type it needs
11974 * to be for correctness. the wrappers are lax with their usage
11975 * so we need to ignore them here
11977 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11978 MonoClass *array_class = mono_array_class_get (klass, 1);
11979 mini_emit_check_array_type (cfg, sp [0], array_class);
11980 CHECK_TYPELOAD (array_class);
11984 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11989 case CEE_LDELEM_I1:
11990 case CEE_LDELEM_U1:
11991 case CEE_LDELEM_I2:
11992 case CEE_LDELEM_U2:
11993 case CEE_LDELEM_I4:
11994 case CEE_LDELEM_U4:
11995 case CEE_LDELEM_I8:
11997 case CEE_LDELEM_R4:
11998 case CEE_LDELEM_R8:
11999 case CEE_LDELEM_REF: {
12005 if (*ip == CEE_LDELEM) {
12007 token = read32 (ip + 1);
12008 klass = mini_get_class (method, token, generic_context);
12009 CHECK_TYPELOAD (klass);
12010 mono_class_init (klass);
12013 klass = array_access_to_klass (*ip);
12015 if (sp [0]->type != STACK_OBJ)
12018 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12020 if (mini_is_gsharedvt_variable_klass (klass)) {
12021 // FIXME-VT: OP_ICONST optimization
12022 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12023 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12024 ins->opcode = OP_LOADV_MEMBASE;
12025 } else if (sp [1]->opcode == OP_ICONST) {
12026 int array_reg = sp [0]->dreg;
12027 int index_reg = sp [1]->dreg;
12028 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
12030 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
12031 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
12033 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12034 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12036 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12037 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12040 if (*ip == CEE_LDELEM)
12047 case CEE_STELEM_I1:
12048 case CEE_STELEM_I2:
12049 case CEE_STELEM_I4:
12050 case CEE_STELEM_I8:
12051 case CEE_STELEM_R4:
12052 case CEE_STELEM_R8:
12053 case CEE_STELEM_REF:
12058 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12060 if (*ip == CEE_STELEM) {
12062 token = read32 (ip + 1);
12063 klass = mini_get_class (method, token, generic_context);
12064 CHECK_TYPELOAD (klass);
12065 mono_class_init (klass);
12068 klass = array_access_to_klass (*ip);
12070 if (sp [0]->type != STACK_OBJ)
12073 emit_array_store (cfg, klass, sp, TRUE);
12075 if (*ip == CEE_STELEM)
12082 case CEE_CKFINITE: {
12086 if (cfg->llvm_only) {
12087 MonoInst *iargs [1];
12089 iargs [0] = sp [0];
12090 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12092 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12093 ins->sreg1 = sp [0]->dreg;
12094 ins->dreg = alloc_freg (cfg);
12095 ins->type = STACK_R8;
12096 MONO_ADD_INS (cfg->cbb, ins);
12098 *sp++ = mono_decompose_opcode (cfg, ins);
12104 case CEE_REFANYVAL: {
12105 MonoInst *src_var, *src;
12107 int klass_reg = alloc_preg (cfg);
12108 int dreg = alloc_preg (cfg);
12110 GSHAREDVT_FAILURE (*ip);
12113 MONO_INST_NEW (cfg, ins, *ip);
12116 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12117 CHECK_TYPELOAD (klass);
12119 context_used = mini_class_check_context_used (cfg, klass);
12122 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12124 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12125 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12128 if (context_used) {
12129 MonoInst *klass_ins;
12131 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12132 klass, MONO_RGCTX_INFO_KLASS);
12135 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12136 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12138 mini_emit_class_check (cfg, klass_reg, klass);
12140 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12141 ins->type = STACK_MP;
12142 ins->klass = klass;
12147 case CEE_MKREFANY: {
12148 MonoInst *loc, *addr;
12150 GSHAREDVT_FAILURE (*ip);
12153 MONO_INST_NEW (cfg, ins, *ip);
12156 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12157 CHECK_TYPELOAD (klass);
12159 context_used = mini_class_check_context_used (cfg, klass);
12161 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12162 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12164 if (context_used) {
12165 MonoInst *const_ins;
12166 int type_reg = alloc_preg (cfg);
12168 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12169 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12171 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12172 } else if (cfg->compile_aot) {
12173 int const_reg = alloc_preg (cfg);
12174 int type_reg = alloc_preg (cfg);
12176 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12177 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12178 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12181 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12182 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12184 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12186 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12187 ins->type = STACK_VTYPE;
12188 ins->klass = mono_defaults.typed_reference_class;
12193 case CEE_LDTOKEN: {
12195 MonoClass *handle_class;
12197 CHECK_STACK_OVF (1);
12200 n = read32 (ip + 1);
12202 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12203 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12204 handle = mono_method_get_wrapper_data (method, n);
12205 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12206 if (handle_class == mono_defaults.typehandle_class)
12207 handle = &((MonoClass*)handle)->byval_arg;
12210 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12215 mono_class_init (handle_class);
12216 if (cfg->gshared) {
12217 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12218 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12219 /* This case handles ldtoken
12220 of an open type, like for
12223 } else if (handle_class == mono_defaults.typehandle_class) {
12224 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12225 } else if (handle_class == mono_defaults.fieldhandle_class)
12226 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12227 else if (handle_class == mono_defaults.methodhandle_class)
12228 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12230 g_assert_not_reached ();
12233 if ((cfg->opt & MONO_OPT_SHARED) &&
12234 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12235 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12236 MonoInst *addr, *vtvar, *iargs [3];
12237 int method_context_used;
12239 method_context_used = mini_method_check_context_used (cfg, method);
12241 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12243 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12244 EMIT_NEW_ICONST (cfg, iargs [1], n);
12245 if (method_context_used) {
12246 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12247 method, MONO_RGCTX_INFO_METHOD);
12248 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12250 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12251 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12253 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12255 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12257 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12259 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12260 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12261 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12262 (cmethod->klass == mono_defaults.systemtype_class) &&
12263 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12264 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12266 mono_class_init (tclass);
12267 if (context_used) {
12268 ins = emit_get_rgctx_klass (cfg, context_used,
12269 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12270 } else if (cfg->compile_aot) {
12271 if (method->wrapper_type) {
12272 mono_error_init (&error); //got to do it since there are multiple conditionals below
12273 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12274 /* Special case for static synchronized wrappers */
12275 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12277 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12278 /* FIXME: n is not a normal token */
12280 EMIT_NEW_PCONST (cfg, ins, NULL);
12283 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12286 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12288 EMIT_NEW_PCONST (cfg, ins, rt);
12290 ins->type = STACK_OBJ;
12291 ins->klass = cmethod->klass;
12294 MonoInst *addr, *vtvar;
12296 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12298 if (context_used) {
12299 if (handle_class == mono_defaults.typehandle_class) {
12300 ins = emit_get_rgctx_klass (cfg, context_used,
12301 mono_class_from_mono_type ((MonoType *)handle),
12302 MONO_RGCTX_INFO_TYPE);
12303 } else if (handle_class == mono_defaults.methodhandle_class) {
12304 ins = emit_get_rgctx_method (cfg, context_used,
12305 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12306 } else if (handle_class == mono_defaults.fieldhandle_class) {
12307 ins = emit_get_rgctx_field (cfg, context_used,
12308 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12310 g_assert_not_reached ();
12312 } else if (cfg->compile_aot) {
12313 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12315 EMIT_NEW_PCONST (cfg, ins, handle);
12317 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12319 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12329 MONO_INST_NEW (cfg, ins, OP_THROW);
12331 ins->sreg1 = sp [0]->dreg;
12333 cfg->cbb->out_of_line = TRUE;
12334 MONO_ADD_INS (cfg->cbb, ins);
12335 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12336 MONO_ADD_INS (cfg->cbb, ins);
12339 link_bblock (cfg, cfg->cbb, end_bblock);
12340 start_new_bblock = 1;
12341 /* This can complicate code generation for llvm since the return value might not be defined */
12342 if (COMPILE_LLVM (cfg))
12343 INLINE_FAILURE ("throw");
12345 case CEE_ENDFINALLY:
12346 /* mono_save_seq_point_info () depends on this */
12347 if (sp != stack_start)
12348 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12349 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12350 MONO_ADD_INS (cfg->cbb, ins);
12352 start_new_bblock = 1;
12355 * Control will leave the method so empty the stack, otherwise
12356 * the next basic block will start with a nonempty stack.
12358 while (sp != stack_start) {
12363 case CEE_LEAVE_S: {
12366 if (*ip == CEE_LEAVE) {
12368 target = ip + 5 + (gint32)read32(ip + 1);
12371 target = ip + 2 + (signed char)(ip [1]);
12374 /* empty the stack */
12375 while (sp != stack_start) {
12380 * If this leave statement is in a catch block, check for a
12381 * pending exception, and rethrow it if necessary.
12382 * We avoid doing this in runtime invoke wrappers, since those are called
12383 * by native code which excepts the wrapper to catch all exceptions.
12385 for (i = 0; i < header->num_clauses; ++i) {
12386 MonoExceptionClause *clause = &header->clauses [i];
12389 * Use <= in the final comparison to handle clauses with multiple
12390 * leave statements, like in bug #78024.
12391 * The ordering of the exception clauses guarantees that we find the
12392 * innermost clause.
12394 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12396 MonoBasicBlock *dont_throw;
12401 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12404 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12406 NEW_BBLOCK (cfg, dont_throw);
12409 * Currently, we always rethrow the abort exception, despite the
12410 * fact that this is not correct. See thread6.cs for an example.
12411 * But propagating the abort exception is more important than
12412 * getting the sematics right.
12414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12415 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12416 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12418 MONO_START_BB (cfg, dont_throw);
12423 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12426 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12428 MonoExceptionClause *clause;
12430 for (tmp = handlers; tmp; tmp = tmp->next) {
12431 clause = (MonoExceptionClause *)tmp->data;
12432 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12434 link_bblock (cfg, cfg->cbb, tblock);
12435 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12436 ins->inst_target_bb = tblock;
12437 ins->inst_eh_block = clause;
12438 MONO_ADD_INS (cfg->cbb, ins);
12439 cfg->cbb->has_call_handler = 1;
12440 if (COMPILE_LLVM (cfg)) {
12441 MonoBasicBlock *target_bb;
12444 * Link the finally bblock with the target, since it will
12445 * conceptually branch there.
12447 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12448 GET_BBLOCK (cfg, target_bb, target);
12449 link_bblock (cfg, tblock, target_bb);
12452 g_list_free (handlers);
12455 MONO_INST_NEW (cfg, ins, OP_BR);
12456 MONO_ADD_INS (cfg->cbb, ins);
12457 GET_BBLOCK (cfg, tblock, target);
12458 link_bblock (cfg, cfg->cbb, tblock);
12459 ins->inst_target_bb = tblock;
12461 start_new_bblock = 1;
12463 if (*ip == CEE_LEAVE)
12472 * Mono specific opcodes
12474 case MONO_CUSTOM_PREFIX: {
12476 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12480 case CEE_MONO_ICALL: {
12482 MonoJitICallInfo *info;
12484 token = read32 (ip + 2);
12485 func = mono_method_get_wrapper_data (method, token);
12486 info = mono_find_jit_icall_by_addr (func);
12488 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12491 CHECK_STACK (info->sig->param_count);
12492 sp -= info->sig->param_count;
12494 ins = mono_emit_jit_icall (cfg, info->func, sp);
12495 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12499 inline_costs += 10 * num_calls++;
12503 case CEE_MONO_LDPTR_CARD_TABLE:
12504 case CEE_MONO_LDPTR_NURSERY_START:
12505 case CEE_MONO_LDPTR_NURSERY_BITS:
12506 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12507 CHECK_STACK_OVF (1);
12510 case CEE_MONO_LDPTR_CARD_TABLE:
12511 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12513 case CEE_MONO_LDPTR_NURSERY_START:
12514 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12516 case CEE_MONO_LDPTR_NURSERY_BITS:
12517 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12519 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12520 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12526 inline_costs += 10 * num_calls++;
12529 case CEE_MONO_LDPTR: {
12532 CHECK_STACK_OVF (1);
12534 token = read32 (ip + 2);
12536 ptr = mono_method_get_wrapper_data (method, token);
12537 EMIT_NEW_PCONST (cfg, ins, ptr);
12540 inline_costs += 10 * num_calls++;
12541 /* Can't embed random pointers into AOT code */
12545 case CEE_MONO_JIT_ICALL_ADDR: {
12546 MonoJitICallInfo *callinfo;
12549 CHECK_STACK_OVF (1);
12551 token = read32 (ip + 2);
12553 ptr = mono_method_get_wrapper_data (method, token);
12554 callinfo = mono_find_jit_icall_by_addr (ptr);
12555 g_assert (callinfo);
12556 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12559 inline_costs += 10 * num_calls++;
12562 case CEE_MONO_ICALL_ADDR: {
12563 MonoMethod *cmethod;
12566 CHECK_STACK_OVF (1);
12568 token = read32 (ip + 2);
12570 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12572 if (cfg->compile_aot) {
12573 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12575 ptr = mono_lookup_internal_call (cmethod);
12577 EMIT_NEW_PCONST (cfg, ins, ptr);
12583 case CEE_MONO_VTADDR: {
12584 MonoInst *src_var, *src;
12590 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12591 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12596 case CEE_MONO_NEWOBJ: {
12597 MonoInst *iargs [2];
12599 CHECK_STACK_OVF (1);
12601 token = read32 (ip + 2);
12602 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12603 mono_class_init (klass);
12604 NEW_DOMAINCONST (cfg, iargs [0]);
12605 MONO_ADD_INS (cfg->cbb, iargs [0]);
12606 NEW_CLASSCONST (cfg, iargs [1], klass);
12607 MONO_ADD_INS (cfg->cbb, iargs [1]);
12608 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12610 inline_costs += 10 * num_calls++;
12613 case CEE_MONO_OBJADDR:
12616 MONO_INST_NEW (cfg, ins, OP_MOVE);
12617 ins->dreg = alloc_ireg_mp (cfg);
12618 ins->sreg1 = sp [0]->dreg;
12619 ins->type = STACK_MP;
12620 MONO_ADD_INS (cfg->cbb, ins);
12624 case CEE_MONO_LDNATIVEOBJ:
12626 * Similar to LDOBJ, but instead load the unmanaged
12627 * representation of the vtype to the stack.
12632 token = read32 (ip + 2);
12633 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12634 g_assert (klass->valuetype);
12635 mono_class_init (klass);
12638 MonoInst *src, *dest, *temp;
12641 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12642 temp->backend.is_pinvoke = 1;
12643 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12644 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12646 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12647 dest->type = STACK_VTYPE;
12648 dest->klass = klass;
12654 case CEE_MONO_RETOBJ: {
12656 * Same as RET, but return the native representation of a vtype
12659 g_assert (cfg->ret);
12660 g_assert (mono_method_signature (method)->pinvoke);
12665 token = read32 (ip + 2);
12666 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12668 if (!cfg->vret_addr) {
12669 g_assert (cfg->ret_var_is_local);
12671 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12673 EMIT_NEW_RETLOADA (cfg, ins);
12675 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12677 if (sp != stack_start)
12680 MONO_INST_NEW (cfg, ins, OP_BR);
12681 ins->inst_target_bb = end_bblock;
12682 MONO_ADD_INS (cfg->cbb, ins);
12683 link_bblock (cfg, cfg->cbb, end_bblock);
12684 start_new_bblock = 1;
12688 case CEE_MONO_CISINST:
12689 case CEE_MONO_CCASTCLASS: {
12694 token = read32 (ip + 2);
12695 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12696 if (ip [1] == CEE_MONO_CISINST)
12697 ins = handle_cisinst (cfg, klass, sp [0]);
12699 ins = handle_ccastclass (cfg, klass, sp [0]);
12704 case CEE_MONO_SAVE_LMF:
12705 case CEE_MONO_RESTORE_LMF:
12708 case CEE_MONO_CLASSCONST:
12709 CHECK_STACK_OVF (1);
12711 token = read32 (ip + 2);
12712 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12715 inline_costs += 10 * num_calls++;
12717 case CEE_MONO_NOT_TAKEN:
12718 cfg->cbb->out_of_line = TRUE;
12721 case CEE_MONO_TLS: {
12724 CHECK_STACK_OVF (1);
12726 key = (MonoTlsKey)read32 (ip + 2);
12727 g_assert (key < TLS_KEY_NUM);
12729 ins = mono_create_tls_get (cfg, key);
12731 if (cfg->compile_aot) {
12733 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12734 ins->dreg = alloc_preg (cfg);
12735 ins->type = STACK_PTR;
12737 g_assert_not_reached ();
12740 ins->type = STACK_PTR;
12741 MONO_ADD_INS (cfg->cbb, ins);
12746 case CEE_MONO_DYN_CALL: {
12747 MonoCallInst *call;
12749 /* It would be easier to call a trampoline, but that would put an
12750 * extra frame on the stack, confusing exception handling. So
12751 * implement it inline using an opcode for now.
12754 if (!cfg->dyn_call_var) {
12755 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12756 /* prevent it from being register allocated */
12757 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12760 /* Has to use a call inst since it local regalloc expects it */
12761 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12762 ins = (MonoInst*)call;
12764 ins->sreg1 = sp [0]->dreg;
12765 ins->sreg2 = sp [1]->dreg;
12766 MONO_ADD_INS (cfg->cbb, ins);
12768 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12771 inline_costs += 10 * num_calls++;
12775 case CEE_MONO_MEMORY_BARRIER: {
12777 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12781 case CEE_MONO_JIT_ATTACH: {
12782 MonoInst *args [16], *domain_ins;
12783 MonoInst *ad_ins, *jit_tls_ins;
12784 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12786 g_assert (!mono_threads_is_coop_enabled ());
12788 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12790 EMIT_NEW_PCONST (cfg, ins, NULL);
12791 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12793 ad_ins = mono_get_domain_intrinsic (cfg);
12794 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12796 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12797 NEW_BBLOCK (cfg, next_bb);
12798 NEW_BBLOCK (cfg, call_bb);
12800 if (cfg->compile_aot) {
12801 /* AOT code is only used in the root domain */
12802 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12804 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12806 MONO_ADD_INS (cfg->cbb, ad_ins);
12807 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12808 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12810 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12811 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12812 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12815 MONO_START_BB (cfg, call_bb);
12818 /* AOT code is only used in the root domain */
12819 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12820 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12821 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12824 MONO_START_BB (cfg, next_bb);
12830 case CEE_MONO_JIT_DETACH: {
12831 MonoInst *args [16];
12833 /* Restore the original domain */
12834 dreg = alloc_ireg (cfg);
12835 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12836 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12840 case CEE_MONO_CALLI_EXTRA_ARG: {
12842 MonoMethodSignature *fsig;
12846 * This is the same as CEE_CALLI, but passes an additional argument
12847 * to the called method in llvmonly mode.
12848 * This is only used by delegate invoke wrappers to call the
12849 * actual delegate method.
12851 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12854 token = read32 (ip + 2);
12862 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12865 if (cfg->llvm_only)
12866 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12868 n = fsig->param_count + fsig->hasthis + 1;
12875 if (cfg->llvm_only) {
12877 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12878 * cconv. This is set by mono_init_delegate ().
12880 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12881 MonoInst *callee = addr;
12882 MonoInst *call, *localloc_ins;
12883 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12884 int low_bit_reg = alloc_preg (cfg);
12886 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12887 NEW_BBLOCK (cfg, end_bb);
12889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12890 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12891 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12893 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12894 addr = emit_get_rgctx_sig (cfg, context_used,
12895 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12897 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12899 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12900 ins->dreg = alloc_preg (cfg);
12901 ins->inst_imm = 2 * SIZEOF_VOID_P;
12902 MONO_ADD_INS (cfg->cbb, ins);
12903 localloc_ins = ins;
12904 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12905 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12908 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12909 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12911 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12912 MONO_START_BB (cfg, is_gsharedvt_bb);
12913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12914 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12915 ins->dreg = call->dreg;
12917 MONO_START_BB (cfg, end_bb);
12919 /* Caller uses a normal calling conv */
12921 MonoInst *callee = addr;
12922 MonoInst *call, *localloc_ins;
12923 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12924 int low_bit_reg = alloc_preg (cfg);
12926 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12927 NEW_BBLOCK (cfg, end_bb);
12929 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12930 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12931 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12933 /* Normal case: callee uses a normal cconv, no conversion is needed */
12934 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12935 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12936 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12937 MONO_START_BB (cfg, is_gsharedvt_bb);
12938 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12939 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12940 MONO_ADD_INS (cfg->cbb, addr);
12942 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12944 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12945 ins->dreg = alloc_preg (cfg);
12946 ins->inst_imm = 2 * SIZEOF_VOID_P;
12947 MONO_ADD_INS (cfg->cbb, ins);
12948 localloc_ins = ins;
12949 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12950 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12953 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12954 ins->dreg = call->dreg;
12955 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12957 MONO_START_BB (cfg, end_bb);
12960 /* Same as CEE_CALLI */
12961 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12963 * We pass the address to the gsharedvt trampoline in the rgctx reg
12965 MonoInst *callee = addr;
12967 addr = emit_get_rgctx_sig (cfg, context_used,
12968 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12969 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12971 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12975 if (!MONO_TYPE_IS_VOID (fsig->ret))
12976 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12978 CHECK_CFG_EXCEPTION;
12982 constrained_class = NULL;
12985 case CEE_MONO_LDDOMAIN:
12986 CHECK_STACK_OVF (1);
12987 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12992 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12998 case CEE_PREFIX1: {
13001 case CEE_ARGLIST: {
13002 /* somewhat similar to LDTOKEN */
13003 MonoInst *addr, *vtvar;
13004 CHECK_STACK_OVF (1);
13005 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
13007 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
13008 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
13010 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
13011 ins->type = STACK_VTYPE;
13012 ins->klass = mono_defaults.argumenthandle_class;
13022 MonoInst *cmp, *arg1, *arg2;
13030 * The following transforms:
13031 * CEE_CEQ into OP_CEQ
13032 * CEE_CGT into OP_CGT
13033 * CEE_CGT_UN into OP_CGT_UN
13034 * CEE_CLT into OP_CLT
13035 * CEE_CLT_UN into OP_CLT_UN
13037 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13039 MONO_INST_NEW (cfg, ins, cmp->opcode);
13040 cmp->sreg1 = arg1->dreg;
13041 cmp->sreg2 = arg2->dreg;
13042 type_from_op (cfg, cmp, arg1, arg2);
13044 add_widen_op (cfg, cmp, &arg1, &arg2);
13045 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13046 cmp->opcode = OP_LCOMPARE;
13047 else if (arg1->type == STACK_R4)
13048 cmp->opcode = OP_RCOMPARE;
13049 else if (arg1->type == STACK_R8)
13050 cmp->opcode = OP_FCOMPARE;
13052 cmp->opcode = OP_ICOMPARE;
13053 MONO_ADD_INS (cfg->cbb, cmp);
13054 ins->type = STACK_I4;
13055 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13056 type_from_op (cfg, ins, arg1, arg2);
13058 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13060 * The backends expect the fceq opcodes to do the
13063 ins->sreg1 = cmp->sreg1;
13064 ins->sreg2 = cmp->sreg2;
13067 MONO_ADD_INS (cfg->cbb, ins);
13073 MonoInst *argconst;
13074 MonoMethod *cil_method;
13076 CHECK_STACK_OVF (1);
13078 n = read32 (ip + 2);
13079 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13082 mono_class_init (cmethod->klass);
13084 mono_save_token_info (cfg, image, n, cmethod);
13086 context_used = mini_method_check_context_used (cfg, cmethod);
13088 cil_method = cmethod;
13089 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13090 emit_method_access_failure (cfg, method, cil_method);
13092 if (mono_security_core_clr_enabled ())
13093 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13096 * Optimize the common case of ldftn+delegate creation
13098 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13099 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13100 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13101 MonoInst *target_ins, *handle_ins;
13102 MonoMethod *invoke;
13103 int invoke_context_used;
13105 invoke = mono_get_delegate_invoke (ctor_method->klass);
13106 if (!invoke || !mono_method_signature (invoke))
13109 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13111 target_ins = sp [-1];
13113 if (mono_security_core_clr_enabled ())
13114 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13116 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13117 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13118 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13120 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13124 /* FIXME: SGEN support */
13125 if (invoke_context_used == 0 || cfg->llvm_only) {
13127 if (cfg->verbose_level > 3)
13128 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13129 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13132 CHECK_CFG_EXCEPTION;
13142 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13143 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13147 inline_costs += 10 * num_calls++;
13150 case CEE_LDVIRTFTN: {
13151 MonoInst *args [2];
13155 n = read32 (ip + 2);
13156 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13159 mono_class_init (cmethod->klass);
13161 context_used = mini_method_check_context_used (cfg, cmethod);
13163 if (mono_security_core_clr_enabled ())
13164 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13167 * Optimize the common case of ldvirtftn+delegate creation
13169 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13170 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13171 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13172 MonoInst *target_ins, *handle_ins;
13173 MonoMethod *invoke;
13174 int invoke_context_used;
13175 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13177 invoke = mono_get_delegate_invoke (ctor_method->klass);
13178 if (!invoke || !mono_method_signature (invoke))
13181 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13183 target_ins = sp [-1];
13185 if (mono_security_core_clr_enabled ())
13186 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13188 /* FIXME: SGEN support */
13189 if (invoke_context_used == 0 || cfg->llvm_only) {
13191 if (cfg->verbose_level > 3)
13192 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13193 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13196 CHECK_CFG_EXCEPTION;
13209 args [1] = emit_get_rgctx_method (cfg, context_used,
13210 cmethod, MONO_RGCTX_INFO_METHOD);
13213 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13215 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13218 inline_costs += 10 * num_calls++;
13222 CHECK_STACK_OVF (1);
13224 n = read16 (ip + 2);
13226 EMIT_NEW_ARGLOAD (cfg, ins, n);
13231 CHECK_STACK_OVF (1);
13233 n = read16 (ip + 2);
13235 NEW_ARGLOADA (cfg, ins, n);
13236 MONO_ADD_INS (cfg->cbb, ins);
13244 n = read16 (ip + 2);
13246 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13248 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13252 CHECK_STACK_OVF (1);
13254 n = read16 (ip + 2);
13256 EMIT_NEW_LOCLOAD (cfg, ins, n);
13261 unsigned char *tmp_ip;
13262 CHECK_STACK_OVF (1);
13264 n = read16 (ip + 2);
13267 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13273 EMIT_NEW_LOCLOADA (cfg, ins, n);
13282 n = read16 (ip + 2);
13284 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13286 emit_stloc_ir (cfg, sp, header, n);
13293 if (sp != stack_start)
13295 if (cfg->method != method)
13297 * Inlining this into a loop in a parent could lead to
13298 * stack overflows which is different behavior than the
13299 * non-inlined case, thus disable inlining in this case.
13301 INLINE_FAILURE("localloc");
13303 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13304 ins->dreg = alloc_preg (cfg);
13305 ins->sreg1 = sp [0]->dreg;
13306 ins->type = STACK_PTR;
13307 MONO_ADD_INS (cfg->cbb, ins);
13309 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13311 ins->flags |= MONO_INST_INIT;
13316 case CEE_ENDFILTER: {
13317 MonoExceptionClause *clause, *nearest;
13322 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13324 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13325 ins->sreg1 = (*sp)->dreg;
13326 MONO_ADD_INS (cfg->cbb, ins);
13327 start_new_bblock = 1;
13331 for (cc = 0; cc < header->num_clauses; ++cc) {
13332 clause = &header->clauses [cc];
13333 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13334 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13335 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13338 g_assert (nearest);
13339 if ((ip - header->code) != nearest->handler_offset)
13344 case CEE_UNALIGNED_:
13345 ins_flag |= MONO_INST_UNALIGNED;
13346 /* FIXME: record alignment? we can assume 1 for now */
13350 case CEE_VOLATILE_:
13351 ins_flag |= MONO_INST_VOLATILE;
13355 ins_flag |= MONO_INST_TAILCALL;
13356 cfg->flags |= MONO_CFG_HAS_TAIL;
13357 /* Can't inline tail calls at this time */
13358 inline_costs += 100000;
13365 token = read32 (ip + 2);
13366 klass = mini_get_class (method, token, generic_context);
13367 CHECK_TYPELOAD (klass);
13368 if (generic_class_is_reference_type (cfg, klass))
13369 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13371 mini_emit_initobj (cfg, *sp, NULL, klass);
13375 case CEE_CONSTRAINED_:
13377 token = read32 (ip + 2);
13378 constrained_class = mini_get_class (method, token, generic_context);
13379 CHECK_TYPELOAD (constrained_class);
13383 case CEE_INITBLK: {
13384 MonoInst *iargs [3];
13388 /* Skip optimized paths for volatile operations. */
13389 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13390 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13391 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13392 /* emit_memset only works when val == 0 */
13393 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13396 iargs [0] = sp [0];
13397 iargs [1] = sp [1];
13398 iargs [2] = sp [2];
13399 if (ip [1] == CEE_CPBLK) {
13401 * FIXME: It's unclear whether we should be emitting both the acquire
13402 * and release barriers for cpblk. It is technically both a load and
13403 * store operation, so it seems like that's the sensible thing to do.
13405 * FIXME: We emit full barriers on both sides of the operation for
13406 * simplicity. We should have a separate atomic memcpy method instead.
13408 MonoMethod *memcpy_method = get_memcpy_method ();
13410 if (ins_flag & MONO_INST_VOLATILE)
13411 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13413 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13414 call->flags |= ins_flag;
13416 if (ins_flag & MONO_INST_VOLATILE)
13417 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13419 MonoMethod *memset_method = get_memset_method ();
13420 if (ins_flag & MONO_INST_VOLATILE) {
13421 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13422 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13424 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13425 call->flags |= ins_flag;
13436 ins_flag |= MONO_INST_NOTYPECHECK;
13438 ins_flag |= MONO_INST_NORANGECHECK;
13439 /* we ignore the no-nullcheck for now since we
13440 * really do it explicitly only when doing callvirt->call
13444 case CEE_RETHROW: {
13446 int handler_offset = -1;
13448 for (i = 0; i < header->num_clauses; ++i) {
13449 MonoExceptionClause *clause = &header->clauses [i];
13450 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13451 handler_offset = clause->handler_offset;
13456 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13458 if (handler_offset == -1)
13461 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13462 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13463 ins->sreg1 = load->dreg;
13464 MONO_ADD_INS (cfg->cbb, ins);
13466 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13467 MONO_ADD_INS (cfg->cbb, ins);
13470 link_bblock (cfg, cfg->cbb, end_bblock);
13471 start_new_bblock = 1;
13479 CHECK_STACK_OVF (1);
13481 token = read32 (ip + 2);
13482 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13483 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13486 val = mono_type_size (type, &ialign);
13488 MonoClass *klass = mini_get_class (method, token, generic_context);
13489 CHECK_TYPELOAD (klass);
13491 val = mono_type_size (&klass->byval_arg, &ialign);
13493 if (mini_is_gsharedvt_klass (klass))
13494 GSHAREDVT_FAILURE (*ip);
13496 EMIT_NEW_ICONST (cfg, ins, val);
13501 case CEE_REFANYTYPE: {
13502 MonoInst *src_var, *src;
13504 GSHAREDVT_FAILURE (*ip);
13510 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13512 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13513 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13514 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13519 case CEE_READONLY_:
13532 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13542 g_warning ("opcode 0x%02x not handled", *ip);
13546 if (start_new_bblock != 1)
13549 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13550 if (cfg->cbb->next_bb) {
13551 /* This could already be set because of inlining, #693905 */
13552 MonoBasicBlock *bb = cfg->cbb;
13554 while (bb->next_bb)
13556 bb->next_bb = end_bblock;
13558 cfg->cbb->next_bb = end_bblock;
13561 if (cfg->method == method && cfg->domainvar) {
13563 MonoInst *get_domain;
13565 cfg->cbb = init_localsbb;
13567 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13568 MONO_ADD_INS (cfg->cbb, get_domain);
13570 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13572 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13573 MONO_ADD_INS (cfg->cbb, store);
13576 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13577 if (cfg->compile_aot)
13578 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13579 mono_get_got_var (cfg);
13582 if (cfg->method == method && cfg->got_var)
13583 mono_emit_load_got_addr (cfg);
13585 if (init_localsbb) {
13586 cfg->cbb = init_localsbb;
13588 for (i = 0; i < header->num_locals; ++i) {
13589 emit_init_local (cfg, i, header->locals [i], init_locals);
13593 if (cfg->init_ref_vars && cfg->method == method) {
13594 /* Emit initialization for ref vars */
13595 // FIXME: Avoid duplication initialization for IL locals.
13596 for (i = 0; i < cfg->num_varinfo; ++i) {
13597 MonoInst *ins = cfg->varinfo [i];
13599 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13600 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13604 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13605 cfg->cbb = init_localsbb;
13606 emit_push_lmf (cfg);
13609 cfg->cbb = init_localsbb;
13610 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13613 MonoBasicBlock *bb;
13616 * Make seq points at backward branch targets interruptable.
13618 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13619 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13620 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13623 /* Add a sequence point for method entry/exit events */
13624 if (seq_points && cfg->gen_sdb_seq_points) {
13625 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13626 MONO_ADD_INS (init_localsbb, ins);
13627 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13628 MONO_ADD_INS (cfg->bb_exit, ins);
13632 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13633 * the code they refer to was dead (#11880).
13635 if (sym_seq_points) {
13636 for (i = 0; i < header->code_size; ++i) {
13637 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13640 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13641 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13648 if (cfg->method == method) {
13649 MonoBasicBlock *bb;
13650 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13651 bb->region = mono_find_block_region (cfg, bb->real_offset);
13653 mono_create_spvar_for_region (cfg, bb->region);
13654 if (cfg->verbose_level > 2)
13655 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13658 MonoBasicBlock *bb;
13659 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13660 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13661 bb->real_offset = inline_offset;
13665 if (inline_costs < 0) {
13668 /* Method is too large */
13669 mname = mono_method_full_name (method, TRUE);
13670 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13674 if ((cfg->verbose_level > 2) && (cfg->method == method))
13675 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13680 g_assert (!mono_error_ok (&cfg->error));
13684 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13688 set_exception_type_from_invalid_il (cfg, method, ip);
13692 g_slist_free (class_inits);
13693 mono_basic_block_free (original_bb);
13694 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13695 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13696 if (cfg->exception_type)
13699 return inline_costs;
13703 store_membase_reg_to_store_membase_imm (int opcode)
13706 case OP_STORE_MEMBASE_REG:
13707 return OP_STORE_MEMBASE_IMM;
13708 case OP_STOREI1_MEMBASE_REG:
13709 return OP_STOREI1_MEMBASE_IMM;
13710 case OP_STOREI2_MEMBASE_REG:
13711 return OP_STOREI2_MEMBASE_IMM;
13712 case OP_STOREI4_MEMBASE_REG:
13713 return OP_STOREI4_MEMBASE_IMM;
13714 case OP_STOREI8_MEMBASE_REG:
13715 return OP_STOREI8_MEMBASE_IMM;
13717 g_assert_not_reached ();
13724 mono_op_to_op_imm (int opcode)
13728 return OP_IADD_IMM;
13730 return OP_ISUB_IMM;
13732 return OP_IDIV_IMM;
13734 return OP_IDIV_UN_IMM;
13736 return OP_IREM_IMM;
13738 return OP_IREM_UN_IMM;
13740 return OP_IMUL_IMM;
13742 return OP_IAND_IMM;
13746 return OP_IXOR_IMM;
13748 return OP_ISHL_IMM;
13750 return OP_ISHR_IMM;
13752 return OP_ISHR_UN_IMM;
13755 return OP_LADD_IMM;
13757 return OP_LSUB_IMM;
13759 return OP_LAND_IMM;
13763 return OP_LXOR_IMM;
13765 return OP_LSHL_IMM;
13767 return OP_LSHR_IMM;
13769 return OP_LSHR_UN_IMM;
13770 #if SIZEOF_REGISTER == 8
13772 return OP_LREM_IMM;
13776 return OP_COMPARE_IMM;
13778 return OP_ICOMPARE_IMM;
13780 return OP_LCOMPARE_IMM;
13782 case OP_STORE_MEMBASE_REG:
13783 return OP_STORE_MEMBASE_IMM;
13784 case OP_STOREI1_MEMBASE_REG:
13785 return OP_STOREI1_MEMBASE_IMM;
13786 case OP_STOREI2_MEMBASE_REG:
13787 return OP_STOREI2_MEMBASE_IMM;
13788 case OP_STOREI4_MEMBASE_REG:
13789 return OP_STOREI4_MEMBASE_IMM;
13791 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13793 return OP_X86_PUSH_IMM;
13794 case OP_X86_COMPARE_MEMBASE_REG:
13795 return OP_X86_COMPARE_MEMBASE_IMM;
13797 #if defined(TARGET_AMD64)
13798 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13799 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13801 case OP_VOIDCALL_REG:
13802 return OP_VOIDCALL;
13810 return OP_LOCALLOC_IMM;
13817 ldind_to_load_membase (int opcode)
13821 return OP_LOADI1_MEMBASE;
13823 return OP_LOADU1_MEMBASE;
13825 return OP_LOADI2_MEMBASE;
13827 return OP_LOADU2_MEMBASE;
13829 return OP_LOADI4_MEMBASE;
13831 return OP_LOADU4_MEMBASE;
13833 return OP_LOAD_MEMBASE;
13834 case CEE_LDIND_REF:
13835 return OP_LOAD_MEMBASE;
13837 return OP_LOADI8_MEMBASE;
13839 return OP_LOADR4_MEMBASE;
13841 return OP_LOADR8_MEMBASE;
13843 g_assert_not_reached ();
13850 stind_to_store_membase (int opcode)
13854 return OP_STOREI1_MEMBASE_REG;
13856 return OP_STOREI2_MEMBASE_REG;
13858 return OP_STOREI4_MEMBASE_REG;
13860 case CEE_STIND_REF:
13861 return OP_STORE_MEMBASE_REG;
13863 return OP_STOREI8_MEMBASE_REG;
13865 return OP_STORER4_MEMBASE_REG;
13867 return OP_STORER8_MEMBASE_REG;
13869 g_assert_not_reached ();
13876 mono_load_membase_to_load_mem (int opcode)
13878 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13879 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13881 case OP_LOAD_MEMBASE:
13882 return OP_LOAD_MEM;
13883 case OP_LOADU1_MEMBASE:
13884 return OP_LOADU1_MEM;
13885 case OP_LOADU2_MEMBASE:
13886 return OP_LOADU2_MEM;
13887 case OP_LOADI4_MEMBASE:
13888 return OP_LOADI4_MEM;
13889 case OP_LOADU4_MEMBASE:
13890 return OP_LOADU4_MEM;
13891 #if SIZEOF_REGISTER == 8
13892 case OP_LOADI8_MEMBASE:
13893 return OP_LOADI8_MEM;
13902 op_to_op_dest_membase (int store_opcode, int opcode)
13904 #if defined(TARGET_X86)
13905 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13910 return OP_X86_ADD_MEMBASE_REG;
13912 return OP_X86_SUB_MEMBASE_REG;
13914 return OP_X86_AND_MEMBASE_REG;
13916 return OP_X86_OR_MEMBASE_REG;
13918 return OP_X86_XOR_MEMBASE_REG;
13921 return OP_X86_ADD_MEMBASE_IMM;
13924 return OP_X86_SUB_MEMBASE_IMM;
13927 return OP_X86_AND_MEMBASE_IMM;
13930 return OP_X86_OR_MEMBASE_IMM;
13933 return OP_X86_XOR_MEMBASE_IMM;
13939 #if defined(TARGET_AMD64)
13940 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13945 return OP_X86_ADD_MEMBASE_REG;
13947 return OP_X86_SUB_MEMBASE_REG;
13949 return OP_X86_AND_MEMBASE_REG;
13951 return OP_X86_OR_MEMBASE_REG;
13953 return OP_X86_XOR_MEMBASE_REG;
13955 return OP_X86_ADD_MEMBASE_IMM;
13957 return OP_X86_SUB_MEMBASE_IMM;
13959 return OP_X86_AND_MEMBASE_IMM;
13961 return OP_X86_OR_MEMBASE_IMM;
13963 return OP_X86_XOR_MEMBASE_IMM;
13965 return OP_AMD64_ADD_MEMBASE_REG;
13967 return OP_AMD64_SUB_MEMBASE_REG;
13969 return OP_AMD64_AND_MEMBASE_REG;
13971 return OP_AMD64_OR_MEMBASE_REG;
13973 return OP_AMD64_XOR_MEMBASE_REG;
13976 return OP_AMD64_ADD_MEMBASE_IMM;
13979 return OP_AMD64_SUB_MEMBASE_IMM;
13982 return OP_AMD64_AND_MEMBASE_IMM;
13985 return OP_AMD64_OR_MEMBASE_IMM;
13988 return OP_AMD64_XOR_MEMBASE_IMM;
13998 op_to_op_store_membase (int store_opcode, int opcode)
14000 #if defined(TARGET_X86) || defined(TARGET_AMD64)
14003 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14004 return OP_X86_SETEQ_MEMBASE;
14006 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14007 return OP_X86_SETNE_MEMBASE;
14015 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
14018 /* FIXME: This has sign extension issues */
14020 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14021 return OP_X86_COMPARE_MEMBASE8_IMM;
14024 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14029 return OP_X86_PUSH_MEMBASE;
14030 case OP_COMPARE_IMM:
14031 case OP_ICOMPARE_IMM:
14032 return OP_X86_COMPARE_MEMBASE_IMM;
14035 return OP_X86_COMPARE_MEMBASE_REG;
14039 #ifdef TARGET_AMD64
14040 /* FIXME: This has sign extension issues */
14042 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14043 return OP_X86_COMPARE_MEMBASE8_IMM;
14048 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14049 return OP_X86_PUSH_MEMBASE;
14051 /* FIXME: This only works for 32 bit immediates
14052 case OP_COMPARE_IMM:
14053 case OP_LCOMPARE_IMM:
14054 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14055 return OP_AMD64_COMPARE_MEMBASE_IMM;
14057 case OP_ICOMPARE_IMM:
14058 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14059 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14063 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14064 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14065 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14066 return OP_AMD64_COMPARE_MEMBASE_REG;
14069 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14070 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14079 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14082 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14088 return OP_X86_COMPARE_REG_MEMBASE;
14090 return OP_X86_ADD_REG_MEMBASE;
14092 return OP_X86_SUB_REG_MEMBASE;
14094 return OP_X86_AND_REG_MEMBASE;
14096 return OP_X86_OR_REG_MEMBASE;
14098 return OP_X86_XOR_REG_MEMBASE;
14102 #ifdef TARGET_AMD64
14103 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14106 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14108 return OP_X86_ADD_REG_MEMBASE;
14110 return OP_X86_SUB_REG_MEMBASE;
14112 return OP_X86_AND_REG_MEMBASE;
14114 return OP_X86_OR_REG_MEMBASE;
14116 return OP_X86_XOR_REG_MEMBASE;
14118 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14122 return OP_AMD64_COMPARE_REG_MEMBASE;
14124 return OP_AMD64_ADD_REG_MEMBASE;
14126 return OP_AMD64_SUB_REG_MEMBASE;
14128 return OP_AMD64_AND_REG_MEMBASE;
14130 return OP_AMD64_OR_REG_MEMBASE;
14132 return OP_AMD64_XOR_REG_MEMBASE;
14141 mono_op_to_op_imm_noemul (int opcode)
14144 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14150 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14157 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14162 return mono_op_to_op_imm (opcode);
14167 * mono_handle_global_vregs:
14169 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14173 mono_handle_global_vregs (MonoCompile *cfg)
14175 gint32 *vreg_to_bb;
14176 MonoBasicBlock *bb;
14179 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14181 #ifdef MONO_ARCH_SIMD_INTRINSICS
14182 if (cfg->uses_simd_intrinsics)
14183 mono_simd_simplify_indirection (cfg);
14186 /* Find local vregs used in more than one bb */
14187 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14188 MonoInst *ins = bb->code;
14189 int block_num = bb->block_num;
14191 if (cfg->verbose_level > 2)
14192 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14195 for (; ins; ins = ins->next) {
14196 const char *spec = INS_INFO (ins->opcode);
14197 int regtype = 0, regindex;
14200 if (G_UNLIKELY (cfg->verbose_level > 2))
14201 mono_print_ins (ins);
14203 g_assert (ins->opcode >= MONO_CEE_LAST);
14205 for (regindex = 0; regindex < 4; regindex ++) {
14208 if (regindex == 0) {
14209 regtype = spec [MONO_INST_DEST];
14210 if (regtype == ' ')
14213 } else if (regindex == 1) {
14214 regtype = spec [MONO_INST_SRC1];
14215 if (regtype == ' ')
14218 } else if (regindex == 2) {
14219 regtype = spec [MONO_INST_SRC2];
14220 if (regtype == ' ')
14223 } else if (regindex == 3) {
14224 regtype = spec [MONO_INST_SRC3];
14225 if (regtype == ' ')
14230 #if SIZEOF_REGISTER == 4
14231 /* In the LLVM case, the long opcodes are not decomposed */
14232 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14234 * Since some instructions reference the original long vreg,
14235 * and some reference the two component vregs, it is quite hard
14236 * to determine when it needs to be global. So be conservative.
14238 if (!get_vreg_to_inst (cfg, vreg)) {
14239 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14241 if (cfg->verbose_level > 2)
14242 printf ("LONG VREG R%d made global.\n", vreg);
14246 * Make the component vregs volatile since the optimizations can
14247 * get confused otherwise.
14249 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14250 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14254 g_assert (vreg != -1);
14256 prev_bb = vreg_to_bb [vreg];
14257 if (prev_bb == 0) {
14258 /* 0 is a valid block num */
14259 vreg_to_bb [vreg] = block_num + 1;
14260 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14261 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14264 if (!get_vreg_to_inst (cfg, vreg)) {
14265 if (G_UNLIKELY (cfg->verbose_level > 2))
14266 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14270 if (vreg_is_ref (cfg, vreg))
14271 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14273 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14276 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14279 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14282 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14285 g_assert_not_reached ();
14289 /* Flag as having been used in more than one bb */
14290 vreg_to_bb [vreg] = -1;
14296 /* If a variable is used in only one bblock, convert it into a local vreg */
14297 for (i = 0; i < cfg->num_varinfo; i++) {
14298 MonoInst *var = cfg->varinfo [i];
14299 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14301 switch (var->type) {
14307 #if SIZEOF_REGISTER == 8
14310 #if !defined(TARGET_X86)
14311 /* Enabling this screws up the fp stack on x86 */
14314 if (mono_arch_is_soft_float ())
14318 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14322 /* Arguments are implicitly global */
14323 /* Putting R4 vars into registers doesn't work currently */
14324 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14325 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14327 * Make that the variable's liveness interval doesn't contain a call, since
14328 * that would cause the lvreg to be spilled, making the whole optimization
14331 /* This is too slow for JIT compilation */
14333 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14335 int def_index, call_index, ins_index;
14336 gboolean spilled = FALSE;
14341 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14342 const char *spec = INS_INFO (ins->opcode);
14344 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14345 def_index = ins_index;
14347 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14348 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14349 if (call_index > def_index) {
14355 if (MONO_IS_CALL (ins))
14356 call_index = ins_index;
14366 if (G_UNLIKELY (cfg->verbose_level > 2))
14367 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14368 var->flags |= MONO_INST_IS_DEAD;
14369 cfg->vreg_to_inst [var->dreg] = NULL;
14376 * Compress the varinfo and vars tables so the liveness computation is faster and
14377 * takes up less space.
14380 for (i = 0; i < cfg->num_varinfo; ++i) {
14381 MonoInst *var = cfg->varinfo [i];
14382 if (pos < i && cfg->locals_start == i)
14383 cfg->locals_start = pos;
14384 if (!(var->flags & MONO_INST_IS_DEAD)) {
14386 cfg->varinfo [pos] = cfg->varinfo [i];
14387 cfg->varinfo [pos]->inst_c0 = pos;
14388 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14389 cfg->vars [pos].idx = pos;
14390 #if SIZEOF_REGISTER == 4
14391 if (cfg->varinfo [pos]->type == STACK_I8) {
14392 /* Modify the two component vars too */
14395 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14396 var1->inst_c0 = pos;
14397 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14398 var1->inst_c0 = pos;
14405 cfg->num_varinfo = pos;
14406 if (cfg->locals_start > cfg->num_varinfo)
14407 cfg->locals_start = cfg->num_varinfo;
14411 * mono_allocate_gsharedvt_vars:
14413 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14414 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14417 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14421 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14423 for (i = 0; i < cfg->num_varinfo; ++i) {
14424 MonoInst *ins = cfg->varinfo [i];
14427 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14428 if (i >= cfg->locals_start) {
14430 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14431 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14432 ins->opcode = OP_GSHAREDVT_LOCAL;
14433 ins->inst_imm = idx;
14436 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14437 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14444 * mono_spill_global_vars:
14446 * Generate spill code for variables which are not allocated to registers,
14447 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14448 * code is generated which could be optimized by the local optimization passes.
14451 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14453 MonoBasicBlock *bb;
14455 int orig_next_vreg;
14456 guint32 *vreg_to_lvreg;
14458 guint32 i, lvregs_len;
14459 gboolean dest_has_lvreg = FALSE;
14460 MonoStackType stacktypes [128];
14461 MonoInst **live_range_start, **live_range_end;
14462 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14464 *need_local_opts = FALSE;
14466 memset (spec2, 0, sizeof (spec2));
14468 /* FIXME: Move this function to mini.c */
14469 stacktypes ['i'] = STACK_PTR;
14470 stacktypes ['l'] = STACK_I8;
14471 stacktypes ['f'] = STACK_R8;
14472 #ifdef MONO_ARCH_SIMD_INTRINSICS
14473 stacktypes ['x'] = STACK_VTYPE;
14476 #if SIZEOF_REGISTER == 4
14477 /* Create MonoInsts for longs */
14478 for (i = 0; i < cfg->num_varinfo; i++) {
14479 MonoInst *ins = cfg->varinfo [i];
14481 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14482 switch (ins->type) {
14487 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14490 g_assert (ins->opcode == OP_REGOFFSET);
14492 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14494 tree->opcode = OP_REGOFFSET;
14495 tree->inst_basereg = ins->inst_basereg;
14496 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14498 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14500 tree->opcode = OP_REGOFFSET;
14501 tree->inst_basereg = ins->inst_basereg;
14502 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14512 if (cfg->compute_gc_maps) {
14513 /* registers need liveness info even for !non refs */
14514 for (i = 0; i < cfg->num_varinfo; i++) {
14515 MonoInst *ins = cfg->varinfo [i];
14517 if (ins->opcode == OP_REGVAR)
14518 ins->flags |= MONO_INST_GC_TRACK;
14522 /* FIXME: widening and truncation */
14525 * As an optimization, when a variable allocated to the stack is first loaded into
14526 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14527 * the variable again.
14529 orig_next_vreg = cfg->next_vreg;
14530 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14531 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14535 * These arrays contain the first and last instructions accessing a given
14537 * Since we emit bblocks in the same order we process them here, and we
14538 * don't split live ranges, these will precisely describe the live range of
14539 * the variable, i.e. the instruction range where a valid value can be found
14540 * in the variables location.
14541 * The live range is computed using the liveness info computed by the liveness pass.
14542 * We can't use vmv->range, since that is an abstract live range, and we need
14543 * one which is instruction precise.
14544 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14546 /* FIXME: Only do this if debugging info is requested */
14547 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14548 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14549 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14550 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14552 /* Add spill loads/stores */
14553 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14556 if (cfg->verbose_level > 2)
14557 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14559 /* Clear vreg_to_lvreg array */
14560 for (i = 0; i < lvregs_len; i++)
14561 vreg_to_lvreg [lvregs [i]] = 0;
14565 MONO_BB_FOR_EACH_INS (bb, ins) {
14566 const char *spec = INS_INFO (ins->opcode);
14567 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14568 gboolean store, no_lvreg;
14569 int sregs [MONO_MAX_SRC_REGS];
14571 if (G_UNLIKELY (cfg->verbose_level > 2))
14572 mono_print_ins (ins);
14574 if (ins->opcode == OP_NOP)
14578 * We handle LDADDR here as well, since it can only be decomposed
14579 * when variable addresses are known.
14581 if (ins->opcode == OP_LDADDR) {
14582 MonoInst *var = (MonoInst *)ins->inst_p0;
14584 if (var->opcode == OP_VTARG_ADDR) {
14585 /* Happens on SPARC/S390 where vtypes are passed by reference */
14586 MonoInst *vtaddr = var->inst_left;
14587 if (vtaddr->opcode == OP_REGVAR) {
14588 ins->opcode = OP_MOVE;
14589 ins->sreg1 = vtaddr->dreg;
14591 else if (var->inst_left->opcode == OP_REGOFFSET) {
14592 ins->opcode = OP_LOAD_MEMBASE;
14593 ins->inst_basereg = vtaddr->inst_basereg;
14594 ins->inst_offset = vtaddr->inst_offset;
14597 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14598 /* gsharedvt arg passed by ref */
14599 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14601 ins->opcode = OP_LOAD_MEMBASE;
14602 ins->inst_basereg = var->inst_basereg;
14603 ins->inst_offset = var->inst_offset;
14604 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14605 MonoInst *load, *load2, *load3;
14606 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14607 int reg1, reg2, reg3;
14608 MonoInst *info_var = cfg->gsharedvt_info_var;
14609 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14613 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14616 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14618 g_assert (info_var);
14619 g_assert (locals_var);
14621 /* Mark the instruction used to compute the locals var as used */
14622 cfg->gsharedvt_locals_var_ins = NULL;
14624 /* Load the offset */
14625 if (info_var->opcode == OP_REGOFFSET) {
14626 reg1 = alloc_ireg (cfg);
14627 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14628 } else if (info_var->opcode == OP_REGVAR) {
14630 reg1 = info_var->dreg;
14632 g_assert_not_reached ();
14634 reg2 = alloc_ireg (cfg);
14635 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14636 /* Load the locals area address */
14637 reg3 = alloc_ireg (cfg);
14638 if (locals_var->opcode == OP_REGOFFSET) {
14639 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14640 } else if (locals_var->opcode == OP_REGVAR) {
14641 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14643 g_assert_not_reached ();
14645 /* Compute the address */
14646 ins->opcode = OP_PADD;
14650 mono_bblock_insert_before_ins (bb, ins, load3);
14651 mono_bblock_insert_before_ins (bb, load3, load2);
14653 mono_bblock_insert_before_ins (bb, load2, load);
14655 g_assert (var->opcode == OP_REGOFFSET);
14657 ins->opcode = OP_ADD_IMM;
14658 ins->sreg1 = var->inst_basereg;
14659 ins->inst_imm = var->inst_offset;
14662 *need_local_opts = TRUE;
14663 spec = INS_INFO (ins->opcode);
14666 if (ins->opcode < MONO_CEE_LAST) {
14667 mono_print_ins (ins);
14668 g_assert_not_reached ();
14672 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14676 if (MONO_IS_STORE_MEMBASE (ins)) {
14677 tmp_reg = ins->dreg;
14678 ins->dreg = ins->sreg2;
14679 ins->sreg2 = tmp_reg;
14682 spec2 [MONO_INST_DEST] = ' ';
14683 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14684 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14685 spec2 [MONO_INST_SRC3] = ' ';
14687 } else if (MONO_IS_STORE_MEMINDEX (ins))
14688 g_assert_not_reached ();
14693 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14694 printf ("\t %.3s %d", spec, ins->dreg);
14695 num_sregs = mono_inst_get_src_registers (ins, sregs);
14696 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14697 printf (" %d", sregs [srcindex]);
14704 regtype = spec [MONO_INST_DEST];
14705 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14708 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14709 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14710 MonoInst *store_ins;
14712 MonoInst *def_ins = ins;
14713 int dreg = ins->dreg; /* The original vreg */
14715 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14717 if (var->opcode == OP_REGVAR) {
14718 ins->dreg = var->dreg;
14719 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14721 * Instead of emitting a load+store, use a _membase opcode.
14723 g_assert (var->opcode == OP_REGOFFSET);
14724 if (ins->opcode == OP_MOVE) {
14728 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14729 ins->inst_basereg = var->inst_basereg;
14730 ins->inst_offset = var->inst_offset;
14733 spec = INS_INFO (ins->opcode);
14737 g_assert (var->opcode == OP_REGOFFSET);
14739 prev_dreg = ins->dreg;
14741 /* Invalidate any previous lvreg for this vreg */
14742 vreg_to_lvreg [ins->dreg] = 0;
14746 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14748 store_opcode = OP_STOREI8_MEMBASE_REG;
14751 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14753 #if SIZEOF_REGISTER != 8
14754 if (regtype == 'l') {
14755 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14756 mono_bblock_insert_after_ins (bb, ins, store_ins);
14757 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14758 mono_bblock_insert_after_ins (bb, ins, store_ins);
14759 def_ins = store_ins;
14764 g_assert (store_opcode != OP_STOREV_MEMBASE);
14766 /* Try to fuse the store into the instruction itself */
14767 /* FIXME: Add more instructions */
14768 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14769 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14770 ins->inst_imm = ins->inst_c0;
14771 ins->inst_destbasereg = var->inst_basereg;
14772 ins->inst_offset = var->inst_offset;
14773 spec = INS_INFO (ins->opcode);
14774 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14775 ins->opcode = store_opcode;
14776 ins->inst_destbasereg = var->inst_basereg;
14777 ins->inst_offset = var->inst_offset;
14781 tmp_reg = ins->dreg;
14782 ins->dreg = ins->sreg2;
14783 ins->sreg2 = tmp_reg;
14786 spec2 [MONO_INST_DEST] = ' ';
14787 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14788 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14789 spec2 [MONO_INST_SRC3] = ' ';
14791 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14792 // FIXME: The backends expect the base reg to be in inst_basereg
14793 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14795 ins->inst_basereg = var->inst_basereg;
14796 ins->inst_offset = var->inst_offset;
14797 spec = INS_INFO (ins->opcode);
14799 /* printf ("INS: "); mono_print_ins (ins); */
14800 /* Create a store instruction */
14801 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14803 /* Insert it after the instruction */
14804 mono_bblock_insert_after_ins (bb, ins, store_ins);
14806 def_ins = store_ins;
14809 * We can't assign ins->dreg to var->dreg here, since the
14810 * sregs could use it. So set a flag, and do it after
14813 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14814 dest_has_lvreg = TRUE;
14819 if (def_ins && !live_range_start [dreg]) {
14820 live_range_start [dreg] = def_ins;
14821 live_range_start_bb [dreg] = bb;
14824 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14827 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14828 tmp->inst_c1 = dreg;
14829 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14836 num_sregs = mono_inst_get_src_registers (ins, sregs);
14837 for (srcindex = 0; srcindex < 3; ++srcindex) {
14838 regtype = spec [MONO_INST_SRC1 + srcindex];
14839 sreg = sregs [srcindex];
14841 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14842 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14843 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14844 MonoInst *use_ins = ins;
14845 MonoInst *load_ins;
14846 guint32 load_opcode;
14848 if (var->opcode == OP_REGVAR) {
14849 sregs [srcindex] = var->dreg;
14850 //mono_inst_set_src_registers (ins, sregs);
14851 live_range_end [sreg] = use_ins;
14852 live_range_end_bb [sreg] = bb;
14854 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14857 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14858 /* var->dreg is a hreg */
14859 tmp->inst_c1 = sreg;
14860 mono_bblock_insert_after_ins (bb, ins, tmp);
14866 g_assert (var->opcode == OP_REGOFFSET);
14868 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14870 g_assert (load_opcode != OP_LOADV_MEMBASE);
14872 if (vreg_to_lvreg [sreg]) {
14873 g_assert (vreg_to_lvreg [sreg] != -1);
14875 /* The variable is already loaded to an lvreg */
14876 if (G_UNLIKELY (cfg->verbose_level > 2))
14877 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14878 sregs [srcindex] = vreg_to_lvreg [sreg];
14879 //mono_inst_set_src_registers (ins, sregs);
14883 /* Try to fuse the load into the instruction */
14884 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14885 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14886 sregs [0] = var->inst_basereg;
14887 //mono_inst_set_src_registers (ins, sregs);
14888 ins->inst_offset = var->inst_offset;
14889 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14890 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14891 sregs [1] = var->inst_basereg;
14892 //mono_inst_set_src_registers (ins, sregs);
14893 ins->inst_offset = var->inst_offset;
14895 if (MONO_IS_REAL_MOVE (ins)) {
14896 ins->opcode = OP_NOP;
14899 //printf ("%d ", srcindex); mono_print_ins (ins);
14901 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14903 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14904 if (var->dreg == prev_dreg) {
14906 * sreg refers to the value loaded by the load
14907 * emitted below, but we need to use ins->dreg
14908 * since it refers to the store emitted earlier.
14912 g_assert (sreg != -1);
14913 vreg_to_lvreg [var->dreg] = sreg;
14914 g_assert (lvregs_len < 1024);
14915 lvregs [lvregs_len ++] = var->dreg;
14919 sregs [srcindex] = sreg;
14920 //mono_inst_set_src_registers (ins, sregs);
14922 #if SIZEOF_REGISTER != 8
14923 if (regtype == 'l') {
14924 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14925 mono_bblock_insert_before_ins (bb, ins, load_ins);
14926 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14927 mono_bblock_insert_before_ins (bb, ins, load_ins);
14928 use_ins = load_ins;
14933 #if SIZEOF_REGISTER == 4
14934 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14936 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14937 mono_bblock_insert_before_ins (bb, ins, load_ins);
14938 use_ins = load_ins;
14942 if (var->dreg < orig_next_vreg) {
14943 live_range_end [var->dreg] = use_ins;
14944 live_range_end_bb [var->dreg] = bb;
14947 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14950 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14951 tmp->inst_c1 = var->dreg;
14952 mono_bblock_insert_after_ins (bb, ins, tmp);
14956 mono_inst_set_src_registers (ins, sregs);
14958 if (dest_has_lvreg) {
14959 g_assert (ins->dreg != -1);
14960 vreg_to_lvreg [prev_dreg] = ins->dreg;
14961 g_assert (lvregs_len < 1024);
14962 lvregs [lvregs_len ++] = prev_dreg;
14963 dest_has_lvreg = FALSE;
14967 tmp_reg = ins->dreg;
14968 ins->dreg = ins->sreg2;
14969 ins->sreg2 = tmp_reg;
14972 if (MONO_IS_CALL (ins)) {
14973 /* Clear vreg_to_lvreg array */
14974 for (i = 0; i < lvregs_len; i++)
14975 vreg_to_lvreg [lvregs [i]] = 0;
14977 } else if (ins->opcode == OP_NOP) {
14979 MONO_INST_NULLIFY_SREGS (ins);
14982 if (cfg->verbose_level > 2)
14983 mono_print_ins_index (1, ins);
14986 /* Extend the live range based on the liveness info */
14987 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14988 for (i = 0; i < cfg->num_varinfo; i ++) {
14989 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14991 if (vreg_is_volatile (cfg, vi->vreg))
14992 /* The liveness info is incomplete */
14995 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14996 /* Live from at least the first ins of this bb */
14997 live_range_start [vi->vreg] = bb->code;
14998 live_range_start_bb [vi->vreg] = bb;
15001 if (mono_bitset_test_fast (bb->live_out_set, i)) {
15002 /* Live at least until the last ins of this bb */
15003 live_range_end [vi->vreg] = bb->last_ins;
15004 live_range_end_bb [vi->vreg] = bb;
15011 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
15012 * by storing the current native offset into MonoMethodVar->live_range_start/end.
15014 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
15015 for (i = 0; i < cfg->num_varinfo; ++i) {
15016 int vreg = MONO_VARINFO (cfg, i)->vreg;
15019 if (live_range_start [vreg]) {
15020 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
15022 ins->inst_c1 = vreg;
15023 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
15025 if (live_range_end [vreg]) {
15026 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
15028 ins->inst_c1 = vreg;
15029 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
15030 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
15032 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15037 if (cfg->gsharedvt_locals_var_ins) {
15038 /* Nullify if unused */
15039 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15040 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15043 g_free (live_range_start);
15044 g_free (live_range_end);
15045 g_free (live_range_start_bb);
15046 g_free (live_range_end_bb);
15051 * - use 'iadd' instead of 'int_add'
15052 * - handling ovf opcodes: decompose in method_to_ir.
15053 * - unify iregs/fregs
15054 * -> partly done, the missing parts are:
15055 * - a more complete unification would involve unifying the hregs as well, so
15056 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15057 * would no longer map to the machine hregs, so the code generators would need to
15058 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15059 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15060 * fp/non-fp branches speeds it up by about 15%.
15061 * - use sext/zext opcodes instead of shifts
15063 * - get rid of TEMPLOADs if possible and use vregs instead
15064 * - clean up usage of OP_P/OP_ opcodes
15065 * - cleanup usage of DUMMY_USE
15066 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15068 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15069 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15070 * - make sure handle_stack_args () is called before the branch is emitted
15071 * - when the new IR is done, get rid of all unused stuff
15072 * - COMPARE/BEQ as separate instructions or unify them ?
15073 * - keeping them separate allows specialized compare instructions like
15074 * compare_imm, compare_membase
15075 * - most back ends unify fp compare+branch, fp compare+ceq
15076 * - integrate mono_save_args into inline_method
15077 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15078 * - handle long shift opts on 32 bit platforms somehow: they require
15079 * 3 sregs (2 for arg1 and 1 for arg2)
15080 * - make byref a 'normal' type.
15081 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15082 * variable if needed.
15083 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15084 * like inline_method.
15085 * - remove inlining restrictions
15086 * - fix LNEG and enable cfold of INEG
15087 * - generalize x86 optimizations like ldelema as a peephole optimization
15088 * - add store_mem_imm for amd64
15089 * - optimize the loading of the interruption flag in the managed->native wrappers
15090 * - avoid special handling of OP_NOP in passes
15091 * - move code inserting instructions into one function/macro.
15092 * - try a coalescing phase after liveness analysis
15093 * - add float -> vreg conversion + local optimizations on !x86
15094 * - figure out how to handle decomposed branches during optimizations, ie.
15095 * compare+branch, op_jump_table+op_br etc.
15096 * - promote RuntimeXHandles to vregs
15097 * - vtype cleanups:
15098 * - add a NEW_VARLOADA_VREG macro
15099 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15100 * accessing vtype fields.
15101 * - get rid of I8CONST on 64 bit platforms
15102 * - dealing with the increase in code size due to branches created during opcode
15104 * - use extended basic blocks
15105 * - all parts of the JIT
15106 * - handle_global_vregs () && local regalloc
15107 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15108 * - sources of increase in code size:
15111 * - isinst and castclass
15112 * - lvregs not allocated to global registers even if used multiple times
15113 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15115 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15116 * - add all micro optimizations from the old JIT
15117 * - put tree optimizations into the deadce pass
15118 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15119 * specific function.
15120 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15121 * fcompare + branchCC.
15122 * - create a helper function for allocating a stack slot, taking into account
15123 * MONO_CFG_HAS_SPILLUP.
15125 * - merge the ia64 switch changes.
15126 * - optimize mono_regstate2_alloc_int/float.
15127 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15128 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15129 * parts of the tree could be separated by other instructions, killing the tree
15130 * arguments, or stores killing loads etc. Also, should we fold loads into other
15131 * instructions if the result of the load is used multiple times ?
15132 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15133 * - LAST MERGE: 108395.
15134 * - when returning vtypes in registers, generate IR and append it to the end of the
15135 * last bb instead of doing it in the epilog.
15136 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15144 - When to decompose opcodes:
15145 - earlier: this makes some optimizations hard to implement, since the low level IR
15146 no longer contains the neccessary information. But it is easier to do.
15147 - later: harder to implement, enables more optimizations.
15148 - Branches inside bblocks:
15149 - created when decomposing complex opcodes.
15150 - branches to another bblock: harmless, but not tracked by the branch
15151 optimizations, so need to branch to a label at the start of the bblock.
15152 - branches to inside the same bblock: very problematic, trips up the local
15153 reg allocator. Can be fixed by spitting the current bblock, but that is a
15154 complex operation, since some local vregs can become global vregs etc.
15155 - Local/global vregs:
15156 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15157 local register allocator.
15158 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15159 structure, created by mono_create_var (). Assigned to hregs or the stack by
15160 the global register allocator.
15161 - When to do optimizations like alu->alu_imm:
15162 - earlier -> saves work later on since the IR will be smaller/simpler
15163 - later -> can work on more instructions
15164 - Handling of valuetypes:
15165 - When a vtype is pushed on the stack, a new temporary is created, an
15166 instruction computing its address (LDADDR) is emitted and pushed on
15167 the stack. Need to optimize cases when the vtype is used immediately as in
15168 argument passing, stloc etc.
15169 - Instead of the to_end stuff in the old JIT, simply call the function handling
15170 the values on the stack before emitting the last instruction of the bb.
15173 #endif /* DISABLE_JIT */