2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
28 #ifdef HAVE_SYS_TIME_H
36 #include <mono/utils/memcheck.h>
38 #include <mono/metadata/abi-details.h>
39 #include <mono/metadata/assembly.h>
40 #include <mono/metadata/attrdefs.h>
41 #include <mono/metadata/loader.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/class.h>
44 #include <mono/metadata/object.h>
45 #include <mono/metadata/exception.h>
46 #include <mono/metadata/opcodes.h>
47 #include <mono/metadata/mono-endian.h>
48 #include <mono/metadata/tokentype.h>
49 #include <mono/metadata/tabledefs.h>
50 #include <mono/metadata/marshal.h>
51 #include <mono/metadata/debug-helpers.h>
52 #include <mono/metadata/mono-debug.h>
53 #include <mono/metadata/mono-debug-debugger.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/metadata/debug-mono-symfile.h>
62 #include <mono/utils/mono-compiler.h>
63 #include <mono/utils/mono-memory-model.h>
64 #include <mono/utils/mono-error-internals.h>
65 #include <mono/metadata/mono-basic-block.h>
66 #include <mono/metadata/reflection-internals.h>
67 #include <mono/utils/mono-threads-coop.h>
73 #include "jit-icalls.h"
75 #include "debugger-agent.h"
76 #include "seq-points.h"
77 #include "aot-compiler.h"
78 #include "mini-llvm.h"
80 #define BRANCH_COST 10
81 #define INLINE_LENGTH_LIMIT 20
83 /* These have 'cfg' as an implicit argument */
84 #define INLINE_FAILURE(msg) do { \
85 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
86 inline_failure (cfg, msg); \
87 goto exception_exit; \
90 #define CHECK_CFG_EXCEPTION do {\
91 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE(method, field) do { \
95 field_access_failure ((cfg), (method), (field)); \
96 goto exception_exit; \
98 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
101 goto exception_exit; \
104 #define GSHAREDVT_FAILURE(opcode) do { \
105 if (cfg->gsharedvt) { \
106 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
107 goto exception_exit; \
110 #define OUT_OF_MEMORY_FAILURE do { \
111 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
112 mono_error_set_out_of_memory (&cfg->error, ""); \
113 goto exception_exit; \
115 #define DISABLE_AOT(cfg) do { \
116 if ((cfg)->verbose_level >= 2) \
117 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
118 (cfg)->disable_aot = TRUE; \
120 #define LOAD_ERROR do { \
121 break_on_unverified (); \
122 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
123 goto exception_exit; \
126 #define TYPE_LOAD_ERROR(klass) do { \
127 cfg->exception_ptr = klass; \
131 #define CHECK_CFG_ERROR do {\
132 if (!mono_error_ok (&cfg->error)) { \
133 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
134 goto mono_error_exit; \
138 /* Determine whenever 'ins' represents a load of the 'this' argument */
139 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
141 static int ldind_to_load_membase (int opcode);
142 static int stind_to_store_membase (int opcode);
144 int mono_op_to_op_imm (int opcode);
145 int mono_op_to_op_imm_noemul (int opcode);
147 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
149 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
150 guchar *ip, guint real_offset, gboolean inline_always);
152 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
160 /* type loading helpers */
161 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
162 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
165 * Instruction metadata
173 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
174 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
180 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
185 /* keep in sync with the enum in mini.h */
188 #include "mini-ops.h"
193 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
194 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
196 * This should contain the index of the last sreg + 1. This is not the same
197 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
199 const gint8 ins_sreg_counts[] = {
200 #include "mini-ops.h"
205 #define MONO_INIT_VARINFO(vi,id) do { \
206 (vi)->range.first_use.pos.bid = 0xffff; \
212 mono_alloc_ireg (MonoCompile *cfg)
214 return alloc_ireg (cfg);
218 mono_alloc_lreg (MonoCompile *cfg)
220 return alloc_lreg (cfg);
224 mono_alloc_freg (MonoCompile *cfg)
226 return alloc_freg (cfg);
230 mono_alloc_preg (MonoCompile *cfg)
232 return alloc_preg (cfg);
236 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
238 return alloc_dreg (cfg, stack_type);
242 * mono_alloc_ireg_ref:
244 * Allocate an IREG, and mark it as holding a GC ref.
247 mono_alloc_ireg_ref (MonoCompile *cfg)
249 return alloc_ireg_ref (cfg);
253 * mono_alloc_ireg_mp:
255 * Allocate an IREG, and mark it as holding a managed pointer.
258 mono_alloc_ireg_mp (MonoCompile *cfg)
260 return alloc_ireg_mp (cfg);
264 * mono_alloc_ireg_copy:
266 * Allocate an IREG with the same GC type as VREG.
269 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
271 if (vreg_is_ref (cfg, vreg))
272 return alloc_ireg_ref (cfg);
273 else if (vreg_is_mp (cfg, vreg))
274 return alloc_ireg_mp (cfg);
276 return alloc_ireg (cfg);
280 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
285 type = mini_get_underlying_type (type);
287 switch (type->type) {
300 case MONO_TYPE_FNPTR:
302 case MONO_TYPE_CLASS:
303 case MONO_TYPE_STRING:
304 case MONO_TYPE_OBJECT:
305 case MONO_TYPE_SZARRAY:
306 case MONO_TYPE_ARRAY:
310 #if SIZEOF_REGISTER == 8
316 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
319 case MONO_TYPE_VALUETYPE:
320 if (type->data.klass->enumtype) {
321 type = mono_class_enum_basetype (type->data.klass);
324 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 case MONO_TYPE_TYPEDBYREF:
329 case MONO_TYPE_GENERICINST:
330 type = &type->data.generic_class->container_class->byval_arg;
334 g_assert (cfg->gshared);
335 if (mini_type_var_is_vt (type))
338 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
340 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
346 mono_print_bb (MonoBasicBlock *bb, const char *msg)
351 printf ("\n%s %d: [IN: ", msg, bb->block_num);
352 for (i = 0; i < bb->in_count; ++i)
353 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
355 for (i = 0; i < bb->out_count; ++i)
356 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *field_fname = mono_field_full_name (field);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
383 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
384 g_free (method_fname);
385 g_free (field_fname);
388 static MONO_NEVER_INLINE void
389 inline_failure (MonoCompile *cfg, const char *msg)
391 if (cfg->verbose_level >= 2)
392 printf ("inline failed: %s\n", msg);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
396 static MONO_NEVER_INLINE void
397 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
399 if (cfg->verbose_level > 2) \
400 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
404 static MONO_NEVER_INLINE void
405 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
407 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
408 if (cfg->verbose_level >= 2)
409 printf ("%s\n", cfg->exception_message);
410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
415 * foo<T> (int i) { ldarg.0; box T; }
417 #define UNVERIFIED do { \
418 if (cfg->gsharedvt) { \
419 if (cfg->verbose_level > 2) \
420 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
422 goto exception_exit; \
424 break_on_unverified (); \
428 #define GET_BBLOCK(cfg,tblock,ip) do { \
429 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
431 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
432 NEW_BBLOCK (cfg, (tblock)); \
433 (tblock)->cil_code = (ip); \
434 ADD_BBLOCK (cfg, (tblock)); \
438 #if defined(TARGET_X86) || defined(TARGET_AMD64)
439 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
440 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
441 (dest)->dreg = alloc_ireg_mp ((cfg)); \
442 (dest)->sreg1 = (sr1); \
443 (dest)->sreg2 = (sr2); \
444 (dest)->inst_imm = (imm); \
445 (dest)->backend.shift_amount = (shift); \
446 MONO_ADD_INS ((cfg)->cbb, (dest)); \
450 /* Emit conversions so both operands of a binary opcode are of the same type */
452 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
454 MonoInst *arg1 = *arg1_ref;
455 MonoInst *arg2 = *arg2_ref;
458 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
459 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
462 /* Mixing r4/r8 is allowed by the spec */
463 if (arg1->type == STACK_R4) {
464 int dreg = alloc_freg (cfg);
466 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
467 conv->type = STACK_R8;
471 if (arg2->type == STACK_R4) {
472 int dreg = alloc_freg (cfg);
474 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
475 conv->type = STACK_R8;
481 #if SIZEOF_REGISTER == 8
482 /* FIXME: Need to add many more cases */
483 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
486 int dr = alloc_preg (cfg);
487 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
488 (ins)->sreg2 = widen->dreg;
493 #define ADD_BINOP(op) do { \
494 MONO_INST_NEW (cfg, ins, (op)); \
496 ins->sreg1 = sp [0]->dreg; \
497 ins->sreg2 = sp [1]->dreg; \
498 type_from_op (cfg, ins, sp [0], sp [1]); \
500 /* Have to insert a widening op */ \
501 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
502 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
503 MONO_ADD_INS ((cfg)->cbb, (ins)); \
504 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
507 #define ADD_UNOP(op) do { \
508 MONO_INST_NEW (cfg, ins, (op)); \
510 ins->sreg1 = sp [0]->dreg; \
511 type_from_op (cfg, ins, sp [0], NULL); \
513 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode (cfg, ins); \
518 #define ADD_BINCOND(next_block) do { \
521 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
522 cmp->sreg1 = sp [0]->dreg; \
523 cmp->sreg2 = sp [1]->dreg; \
524 type_from_op (cfg, cmp, sp [0], sp [1]); \
526 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
527 type_from_op (cfg, ins, sp [0], sp [1]); \
528 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
529 GET_BBLOCK (cfg, tblock, target); \
530 link_bblock (cfg, cfg->cbb, tblock); \
531 ins->inst_true_bb = tblock; \
532 if ((next_block)) { \
533 link_bblock (cfg, cfg->cbb, (next_block)); \
534 ins->inst_false_bb = (next_block); \
535 start_new_bblock = 1; \
537 GET_BBLOCK (cfg, tblock, ip); \
538 link_bblock (cfg, cfg->cbb, tblock); \
539 ins->inst_false_bb = tblock; \
540 start_new_bblock = 2; \
542 if (sp != stack_start) { \
543 handle_stack_args (cfg, stack_start, sp - stack_start); \
544 CHECK_UNVERIFIABLE (cfg); \
546 MONO_ADD_INS (cfg->cbb, cmp); \
547 MONO_ADD_INS (cfg->cbb, ins); \
551 * link_bblock: Links two basic blocks
553 * links two basic blocks in the control flow graph, the 'from'
554 * argument is the starting block and the 'to' argument is the block
555 * the control flow ends to after 'from'.
558 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
560 MonoBasicBlock **newa;
564 if (from->cil_code) {
566 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
568 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
571 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
573 printf ("edge from entry to exit\n");
578 for (i = 0; i < from->out_count; ++i) {
579 if (to == from->out_bb [i]) {
585 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
586 for (i = 0; i < from->out_count; ++i) {
587 newa [i] = from->out_bb [i];
595 for (i = 0; i < to->in_count; ++i) {
596 if (from == to->in_bb [i]) {
602 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
603 for (i = 0; i < to->in_count; ++i) {
604 newa [i] = to->in_bb [i];
613 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
615 link_bblock (cfg, from, to);
619 * mono_find_block_region:
621 * We mark each basic block with a region ID. We use that to avoid BB
622 * optimizations when blocks are in different regions.
625 * A region token that encodes where this region is, and information
626 * about the clause owner for this block.
628 * The region encodes the try/catch/filter clause that owns this block
629 * as well as the type. -1 is a special value that represents a block
630 * that is in none of try/catch/filter.
633 mono_find_block_region (MonoCompile *cfg, int offset)
635 MonoMethodHeader *header = cfg->header;
636 MonoExceptionClause *clause;
639 for (i = 0; i < header->num_clauses; ++i) {
640 clause = &header->clauses [i];
641 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
642 (offset < (clause->handler_offset)))
643 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
645 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
646 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
647 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
648 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
649 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
651 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
654 for (i = 0; i < header->num_clauses; ++i) {
655 clause = &header->clauses [i];
657 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
658 return ((i + 1) << 8) | clause->flags;
665 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
667 MonoMethodHeader *header = cfg->header;
668 MonoExceptionClause *clause;
672 for (i = 0; i < header->num_clauses; ++i) {
673 clause = &header->clauses [i];
674 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
675 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
676 if (clause->flags == type)
677 res = g_list_append (res, clause);
684 mono_create_spvar_for_region (MonoCompile *cfg, int region)
688 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
692 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
693 /* prevent it from being register allocated */
694 var->flags |= MONO_INST_VOLATILE;
696 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
700 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
702 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
706 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
710 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
714 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
715 /* prevent it from being register allocated */
716 var->flags |= MONO_INST_VOLATILE;
718 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
724 * Returns the type used in the eval stack when @type is loaded.
725 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
728 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
732 type = mini_get_underlying_type (type);
733 inst->klass = klass = mono_class_from_mono_type (type);
735 inst->type = STACK_MP;
740 switch (type->type) {
742 inst->type = STACK_INV;
750 inst->type = STACK_I4;
755 case MONO_TYPE_FNPTR:
756 inst->type = STACK_PTR;
758 case MONO_TYPE_CLASS:
759 case MONO_TYPE_STRING:
760 case MONO_TYPE_OBJECT:
761 case MONO_TYPE_SZARRAY:
762 case MONO_TYPE_ARRAY:
763 inst->type = STACK_OBJ;
767 inst->type = STACK_I8;
770 inst->type = cfg->r4_stack_type;
773 inst->type = STACK_R8;
775 case MONO_TYPE_VALUETYPE:
776 if (type->data.klass->enumtype) {
777 type = mono_class_enum_basetype (type->data.klass);
781 inst->type = STACK_VTYPE;
784 case MONO_TYPE_TYPEDBYREF:
785 inst->klass = mono_defaults.typed_reference_class;
786 inst->type = STACK_VTYPE;
788 case MONO_TYPE_GENERICINST:
789 type = &type->data.generic_class->container_class->byval_arg;
793 g_assert (cfg->gshared);
794 if (mini_is_gsharedvt_type (type)) {
795 g_assert (cfg->gsharedvt);
796 inst->type = STACK_VTYPE;
798 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
802 g_error ("unknown type 0x%02x in eval stack type", type->type);
807 * The following tables are used to quickly validate the IL code in type_from_op ().
810 bin_num_table [STACK_MAX] [STACK_MAX] = {
811 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
816 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
824 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
827 /* reduce the size of this table */
829 bin_int_table [STACK_MAX] [STACK_MAX] = {
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
841 bin_comp_table [STACK_MAX] [STACK_MAX] = {
842 /* Inv i L p F & O vt r4 */
844 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
845 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
846 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
847 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
848 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
849 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
850 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
851 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
854 /* reduce the size of this table */
856 shift_table [STACK_MAX] [STACK_MAX] = {
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
868 * Tables to map from the non-specific opcode to the matching
869 * type-specific opcode.
871 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
873 binops_op_map [STACK_MAX] = {
874 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
877 /* handles from CEE_NEG to CEE_CONV_U8 */
879 unops_op_map [STACK_MAX] = {
880 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
883 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
885 ovfops_op_map [STACK_MAX] = {
886 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
889 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
891 ovf2ops_op_map [STACK_MAX] = {
892 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
895 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
897 ovf3ops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
901 /* handles from CEE_BEQ to CEE_BLT_UN */
903 beqops_op_map [STACK_MAX] = {
904 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
907 /* handles from CEE_CEQ to CEE_CLT_UN */
909 ceqops_op_map [STACK_MAX] = {
910 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
914 * Sets ins->type (the type on the eval stack) according to the
915 * type of the opcode and the arguments to it.
916 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
918 * FIXME: this function sets ins->type unconditionally in some cases, but
919 * it should set it to invalid for some types (a conv.x on an object)
922 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
924 switch (ins->opcode) {
931 /* FIXME: check unverifiable args for STACK_MP */
932 ins->type = bin_num_table [src1->type] [src2->type];
933 ins->opcode += binops_op_map [ins->type];
940 ins->type = bin_int_table [src1->type] [src2->type];
941 ins->opcode += binops_op_map [ins->type];
946 ins->type = shift_table [src1->type] [src2->type];
947 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
953 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
954 ins->opcode = OP_LCOMPARE;
955 else if (src1->type == STACK_R4)
956 ins->opcode = OP_RCOMPARE;
957 else if (src1->type == STACK_R8)
958 ins->opcode = OP_FCOMPARE;
960 ins->opcode = OP_ICOMPARE;
962 case OP_ICOMPARE_IMM:
963 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE_IMM;
977 ins->opcode += beqops_op_map [src1->type];
980 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
981 ins->opcode += ceqops_op_map [src1->type];
987 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
992 ins->type = neg_table [src1->type];
993 ins->opcode += unops_op_map [ins->type];
996 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
997 ins->type = src1->type;
999 ins->type = STACK_INV;
1000 ins->opcode += unops_op_map [ins->type];
1006 ins->type = STACK_I4;
1007 ins->opcode += unops_op_map [src1->type];
1010 ins->type = STACK_R8;
1011 switch (src1->type) {
1014 ins->opcode = OP_ICONV_TO_R_UN;
1017 ins->opcode = OP_LCONV_TO_R_UN;
1021 case CEE_CONV_OVF_I1:
1022 case CEE_CONV_OVF_U1:
1023 case CEE_CONV_OVF_I2:
1024 case CEE_CONV_OVF_U2:
1025 case CEE_CONV_OVF_I4:
1026 case CEE_CONV_OVF_U4:
1027 ins->type = STACK_I4;
1028 ins->opcode += ovf3ops_op_map [src1->type];
1030 case CEE_CONV_OVF_I_UN:
1031 case CEE_CONV_OVF_U_UN:
1032 ins->type = STACK_PTR;
1033 ins->opcode += ovf2ops_op_map [src1->type];
1035 case CEE_CONV_OVF_I1_UN:
1036 case CEE_CONV_OVF_I2_UN:
1037 case CEE_CONV_OVF_I4_UN:
1038 case CEE_CONV_OVF_U1_UN:
1039 case CEE_CONV_OVF_U2_UN:
1040 case CEE_CONV_OVF_U4_UN:
1041 ins->type = STACK_I4;
1042 ins->opcode += ovf2ops_op_map [src1->type];
1045 ins->type = STACK_PTR;
1046 switch (src1->type) {
1048 ins->opcode = OP_ICONV_TO_U;
1052 #if SIZEOF_VOID_P == 8
1053 ins->opcode = OP_LCONV_TO_U;
1055 ins->opcode = OP_MOVE;
1059 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_FCONV_TO_U;
1068 ins->type = STACK_I8;
1069 ins->opcode += unops_op_map [src1->type];
1071 case CEE_CONV_OVF_I8:
1072 case CEE_CONV_OVF_U8:
1073 ins->type = STACK_I8;
1074 ins->opcode += ovf3ops_op_map [src1->type];
1076 case CEE_CONV_OVF_U8_UN:
1077 case CEE_CONV_OVF_I8_UN:
1078 ins->type = STACK_I8;
1079 ins->opcode += ovf2ops_op_map [src1->type];
1082 ins->type = cfg->r4_stack_type;
1083 ins->opcode += unops_op_map [src1->type];
1086 ins->type = STACK_R8;
1087 ins->opcode += unops_op_map [src1->type];
1090 ins->type = STACK_R8;
1094 ins->type = STACK_I4;
1095 ins->opcode += ovfops_op_map [src1->type];
1098 case CEE_CONV_OVF_I:
1099 case CEE_CONV_OVF_U:
1100 ins->type = STACK_PTR;
1101 ins->opcode += ovfops_op_map [src1->type];
1104 case CEE_ADD_OVF_UN:
1106 case CEE_MUL_OVF_UN:
1108 case CEE_SUB_OVF_UN:
1109 ins->type = bin_num_table [src1->type] [src2->type];
1110 ins->opcode += ovfops_op_map [src1->type];
1111 if (ins->type == STACK_R8)
1112 ins->type = STACK_INV;
1114 case OP_LOAD_MEMBASE:
1115 ins->type = STACK_PTR;
1117 case OP_LOADI1_MEMBASE:
1118 case OP_LOADU1_MEMBASE:
1119 case OP_LOADI2_MEMBASE:
1120 case OP_LOADU2_MEMBASE:
1121 case OP_LOADI4_MEMBASE:
1122 case OP_LOADU4_MEMBASE:
1123 ins->type = STACK_PTR;
1125 case OP_LOADI8_MEMBASE:
1126 ins->type = STACK_I8;
1128 case OP_LOADR4_MEMBASE:
1129 ins->type = cfg->r4_stack_type;
1131 case OP_LOADR8_MEMBASE:
1132 ins->type = STACK_R8;
1135 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1139 if (ins->type == STACK_MP)
1140 ins->klass = mono_defaults.object_class;
1145 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1151 param_table [STACK_MAX] [STACK_MAX] = {
1156 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1161 switch (args->type) {
1171 for (i = 0; i < sig->param_count; ++i) {
1172 switch (args [i].type) {
1176 if (!sig->params [i]->byref)
1180 if (sig->params [i]->byref)
1182 switch (sig->params [i]->type) {
1183 case MONO_TYPE_CLASS:
1184 case MONO_TYPE_STRING:
1185 case MONO_TYPE_OBJECT:
1186 case MONO_TYPE_SZARRAY:
1187 case MONO_TYPE_ARRAY:
1194 if (sig->params [i]->byref)
1196 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1205 /*if (!param_table [args [i].type] [sig->params [i]->type])
1213 * When we need a pointer to the current domain many times in a method, we
1214 * call mono_domain_get() once and we store the result in a local variable.
1215 * This function returns the variable that represents the MonoDomain*.
1217 inline static MonoInst *
1218 mono_get_domainvar (MonoCompile *cfg)
1220 if (!cfg->domainvar)
1221 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1222 return cfg->domainvar;
1226 * The got_var contains the address of the Global Offset Table when AOT
1230 mono_get_got_var (MonoCompile *cfg)
1232 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1234 if (!cfg->got_var) {
1235 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1237 return cfg->got_var;
1241 mono_get_vtable_var (MonoCompile *cfg)
1243 g_assert (cfg->gshared);
1245 if (!cfg->rgctx_var) {
1246 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 /* force the var to be stack allocated */
1248 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1251 return cfg->rgctx_var;
1255 type_from_stack_type (MonoInst *ins) {
1256 switch (ins->type) {
1257 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1258 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1259 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1260 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1261 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1263 return &ins->klass->this_arg;
1264 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1265 case STACK_VTYPE: return &ins->klass->byval_arg;
1267 g_error ("stack type %d to monotype not handled\n", ins->type);
1272 static G_GNUC_UNUSED int
1273 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1275 t = mono_type_get_underlying_type (t);
1287 case MONO_TYPE_FNPTR:
1289 case MONO_TYPE_CLASS:
1290 case MONO_TYPE_STRING:
1291 case MONO_TYPE_OBJECT:
1292 case MONO_TYPE_SZARRAY:
1293 case MONO_TYPE_ARRAY:
1299 return cfg->r4_stack_type;
1302 case MONO_TYPE_VALUETYPE:
1303 case MONO_TYPE_TYPEDBYREF:
1305 case MONO_TYPE_GENERICINST:
1306 if (mono_type_generic_inst_is_valuetype (t))
1312 g_assert_not_reached ();
1319 array_access_to_klass (int opcode)
1323 return mono_defaults.byte_class;
1325 return mono_defaults.uint16_class;
1328 return mono_defaults.int_class;
1331 return mono_defaults.sbyte_class;
1334 return mono_defaults.int16_class;
1337 return mono_defaults.int32_class;
1339 return mono_defaults.uint32_class;
1342 return mono_defaults.int64_class;
1345 return mono_defaults.single_class;
1348 return mono_defaults.double_class;
1349 case CEE_LDELEM_REF:
1350 case CEE_STELEM_REF:
1351 return mono_defaults.object_class;
1353 g_assert_not_reached ();
1359 * We try to share variables when possible
1362 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1367 /* inlining can result in deeper stacks */
1368 if (slot >= cfg->header->max_stack)
1369 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1371 pos = ins->type - 1 + slot * STACK_MAX;
1373 switch (ins->type) {
1380 if ((vnum = cfg->intvars [pos]))
1381 return cfg->varinfo [vnum];
1382 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 cfg->intvars [pos] = res->inst_c0;
1386 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1392 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1395 * Don't use this if a generic_context is set, since that means AOT can't
1396 * look up the method using just the image+token.
1397 * table == 0 means this is a reference made from a wrapper.
1399 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1400 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1401 jump_info_token->image = image;
1402 jump_info_token->token = token;
1403 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1408 * This function is called to handle items that are left on the evaluation stack
1409 * at basic block boundaries. What happens is that we save the values to local variables
1410 * and we reload them later when first entering the target basic block (with the
1411 * handle_loaded_temps () function).
1412 * A single joint point will use the same variables (stored in the array bb->out_stack or
1413 * bb->in_stack, if the basic block is before or after the joint point).
1415 * This function needs to be called _before_ emitting the last instruction of
1416 * the bb (i.e. before emitting a branch).
1417 * If the stack merge fails at a join point, cfg->unverifiable is set.
1420 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1423 MonoBasicBlock *bb = cfg->cbb;
1424 MonoBasicBlock *outb;
1425 MonoInst *inst, **locals;
1430 if (cfg->verbose_level > 3)
1431 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1432 if (!bb->out_scount) {
1433 bb->out_scount = count;
1434 //printf ("bblock %d has out:", bb->block_num);
1436 for (i = 0; i < bb->out_count; ++i) {
1437 outb = bb->out_bb [i];
1438 /* exception handlers are linked, but they should not be considered for stack args */
1439 if (outb->flags & BB_EXCEPTION_HANDLER)
1441 //printf (" %d", outb->block_num);
1442 if (outb->in_stack) {
1444 bb->out_stack = outb->in_stack;
1450 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1451 for (i = 0; i < count; ++i) {
1453 * try to reuse temps already allocated for this purpouse, if they occupy the same
1454 * stack slot and if they are of the same type.
1455 * This won't cause conflicts since if 'local' is used to
1456 * store one of the values in the in_stack of a bblock, then
1457 * the same variable will be used for the same outgoing stack
1459 * This doesn't work when inlining methods, since the bblocks
1460 * in the inlined methods do not inherit their in_stack from
1461 * the bblock they are inlined to. See bug #58863 for an
1464 if (cfg->inlined_method)
1465 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1467 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1472 for (i = 0; i < bb->out_count; ++i) {
1473 outb = bb->out_bb [i];
1474 /* exception handlers are linked, but they should not be considered for stack args */
1475 if (outb->flags & BB_EXCEPTION_HANDLER)
1477 if (outb->in_scount) {
1478 if (outb->in_scount != bb->out_scount) {
1479 cfg->unverifiable = TRUE;
1482 continue; /* check they are the same locals */
1484 outb->in_scount = count;
1485 outb->in_stack = bb->out_stack;
1488 locals = bb->out_stack;
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1499 * It is possible that the out bblocks already have in_stack assigned, and
1500 * the in_stacks differ. In this case, we will store to all the different
1507 /* Find a bblock which has a different in_stack */
1509 while (bindex < bb->out_count) {
1510 outb = bb->out_bb [bindex];
1511 /* exception handlers are linked, but they should not be considered for stack args */
1512 if (outb->flags & BB_EXCEPTION_HANDLER) {
1516 if (outb->in_stack != locals) {
1517 for (i = 0; i < count; ++i) {
1518 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1519 inst->cil_code = sp [i]->cil_code;
1520 sp [i] = locals [i];
1521 if (cfg->verbose_level > 3)
1522 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1524 locals = outb->in_stack;
1534 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1538 if (cfg->compile_aot) {
1539 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1545 ji.type = patch_type;
1546 ji.data.target = data;
1547 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1548 mono_error_assert_ok (&error);
1550 EMIT_NEW_PCONST (cfg, ins, target);
1556 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1558 int ibitmap_reg = alloc_preg (cfg);
1559 #ifdef COMPRESSED_INTERFACE_BITMAP
1561 MonoInst *res, *ins;
1562 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1563 MONO_ADD_INS (cfg->cbb, ins);
1565 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1566 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1567 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1569 int ibitmap_byte_reg = alloc_preg (cfg);
1571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1573 if (cfg->compile_aot) {
1574 int iid_reg = alloc_preg (cfg);
1575 int shifted_iid_reg = alloc_preg (cfg);
1576 int ibitmap_byte_address_reg = alloc_preg (cfg);
1577 int masked_iid_reg = alloc_preg (cfg);
1578 int iid_one_bit_reg = alloc_preg (cfg);
1579 int iid_bit_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1582 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1583 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1585 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1586 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1587 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1589 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1596 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1597 * stored in "klass_reg" implements the interface "klass".
1600 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1602 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1606 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1607 * stored in "vtable_reg" implements the interface "klass".
1610 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1612 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1616 * Emit code which checks whenever the interface id of @klass is smaller than
1617 * than the value given by max_iid_reg.
1620 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1621 MonoBasicBlock *false_target)
1623 if (cfg->compile_aot) {
1624 int iid_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1633 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1636 /* Same as above, but obtains max_iid from a vtable */
1638 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1639 MonoBasicBlock *false_target)
1641 int max_iid_reg = alloc_preg (cfg);
1643 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1644 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1647 /* Same as above, but obtains max_iid from a klass */
1649 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1650 MonoBasicBlock *false_target)
1652 int max_iid_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1655 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1659 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1661 int idepth_reg = alloc_preg (cfg);
1662 int stypes_reg = alloc_preg (cfg);
1663 int stype = alloc_preg (cfg);
1665 mono_class_setup_supertypes (klass);
1667 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1668 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1675 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1676 } else if (cfg->compile_aot) {
1677 int const_reg = alloc_preg (cfg);
1678 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1679 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1687 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1689 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1693 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1695 int intf_reg = alloc_preg (cfg);
1697 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1698 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1703 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1707 * Variant of the above that takes a register to the class, not the vtable.
1710 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1712 int intf_bit_reg = alloc_preg (cfg);
1714 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1715 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1720 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1724 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1727 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1729 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1730 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1732 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1736 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1738 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1742 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1744 if (cfg->compile_aot) {
1745 int const_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1751 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1755 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1758 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1761 int rank_reg = alloc_preg (cfg);
1762 int eclass_reg = alloc_preg (cfg);
1764 g_assert (!klass_inst);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1766 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1767 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1768 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1770 if (klass->cast_class == mono_defaults.object_class) {
1771 int parent_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1773 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1774 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1775 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1776 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1777 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1778 } else if (klass->cast_class == mono_defaults.enum_class) {
1779 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1780 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1781 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1783 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1784 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1787 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1788 /* Check that the object is a vector too */
1789 int bounds_reg = alloc_preg (cfg);
1790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1791 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1792 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1795 int idepth_reg = alloc_preg (cfg);
1796 int stypes_reg = alloc_preg (cfg);
1797 int stype = alloc_preg (cfg);
1799 mono_class_setup_supertypes (klass);
1801 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1802 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1804 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1808 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1813 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1815 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1819 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1823 g_assert (val == 0);
1828 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1831 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1834 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1837 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1839 #if SIZEOF_REGISTER == 8
1841 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1847 val_reg = alloc_preg (cfg);
1849 if (SIZEOF_REGISTER == 8)
1850 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1852 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1855 /* This could be optimized further if neccesary */
1857 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1864 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1895 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1902 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1903 g_assert (size < 10000);
1906 /* This could be optimized further if neccesary */
1908 cur_reg = alloc_preg (cfg);
1909 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1917 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1929 cur_reg = alloc_preg (cfg);
1930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1937 cur_reg = alloc_preg (cfg);
1938 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1939 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1945 cur_reg = alloc_preg (cfg);
1946 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1947 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1955 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1959 if (cfg->compile_aot) {
1960 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1961 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1963 ins->sreg2 = c->dreg;
1964 MONO_ADD_INS (cfg->cbb, ins);
1966 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1968 ins->inst_offset = mini_get_tls_offset (tls_key);
1969 MONO_ADD_INS (cfg->cbb, ins);
1976 * Emit IR to push the current LMF onto the LMF stack.
1979 emit_push_lmf (MonoCompile *cfg)
1982 * Emit IR to push the LMF:
1983 * lmf_addr = <lmf_addr from tls>
1984 * lmf->lmf_addr = lmf_addr
1985 * lmf->prev_lmf = *lmf_addr
1988 int lmf_reg, prev_lmf_reg;
1989 MonoInst *ins, *lmf_ins;
1994 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1995 /* Load current lmf */
1996 lmf_ins = mono_get_lmf_intrinsic (cfg);
1998 MONO_ADD_INS (cfg->cbb, lmf_ins);
1999 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2000 lmf_reg = ins->dreg;
2001 /* Save previous_lmf */
2002 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2004 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2007 * Store lmf_addr in a variable, so it can be allocated to a global register.
2009 if (!cfg->lmf_addr_var)
2010 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2013 ins = mono_get_jit_tls_intrinsic (cfg);
2015 int jit_tls_dreg = ins->dreg;
2017 MONO_ADD_INS (cfg->cbb, ins);
2018 lmf_reg = alloc_preg (cfg);
2019 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2021 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2024 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2026 MONO_ADD_INS (cfg->cbb, lmf_ins);
2029 MonoInst *args [16], *jit_tls_ins, *ins;
2031 /* Inline mono_get_lmf_addr () */
2032 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2034 /* Load mono_jit_tls_id */
2035 if (cfg->compile_aot)
2036 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2038 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2039 /* call pthread_getspecific () */
2040 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2041 /* lmf_addr = &jit_tls->lmf */
2042 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2045 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2049 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2051 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2052 lmf_reg = ins->dreg;
2054 prev_lmf_reg = alloc_preg (cfg);
2055 /* Save previous_lmf */
2056 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2057 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2059 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2066 * Emit IR to pop the current LMF from the LMF stack.
2069 emit_pop_lmf (MonoCompile *cfg)
2071 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2077 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2078 lmf_reg = ins->dreg;
2080 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2081 /* Load previous_lmf */
2082 prev_lmf_reg = alloc_preg (cfg);
2083 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2085 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2088 * Emit IR to pop the LMF:
2089 * *(lmf->lmf_addr) = lmf->prev_lmf
2091 /* This could be called before emit_push_lmf () */
2092 if (!cfg->lmf_addr_var)
2093 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2094 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2096 prev_lmf_reg = alloc_preg (cfg);
2097 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2098 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2103 emit_instrumentation_call (MonoCompile *cfg, void *func)
2105 MonoInst *iargs [1];
2108 * Avoid instrumenting inlined methods since it can
2109 * distort profiling results.
2111 if (cfg->method != cfg->current_method)
2114 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2115 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2116 mono_emit_jit_icall (cfg, func, iargs);
2121 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2124 type = mini_get_underlying_type (type);
2125 switch (type->type) {
2126 case MONO_TYPE_VOID:
2127 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2134 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2138 case MONO_TYPE_FNPTR:
2139 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2140 case MONO_TYPE_CLASS:
2141 case MONO_TYPE_STRING:
2142 case MONO_TYPE_OBJECT:
2143 case MONO_TYPE_SZARRAY:
2144 case MONO_TYPE_ARRAY:
2145 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2148 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2151 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2153 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2155 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2156 case MONO_TYPE_VALUETYPE:
2157 if (type->data.klass->enumtype) {
2158 type = mono_class_enum_basetype (type->data.klass);
2161 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2162 case MONO_TYPE_TYPEDBYREF:
2163 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2164 case MONO_TYPE_GENERICINST:
2165 type = &type->data.generic_class->container_class->byval_arg;
2168 case MONO_TYPE_MVAR:
2170 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2172 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2177 //XXX this ignores if t is byref
2178 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
2181 * target_type_is_incompatible:
2182 * @cfg: MonoCompile context
2184 * Check that the item @arg on the evaluation stack can be stored
2185 * in the target type (can be a local, or field, etc).
2186 * The cfg arg can be used to check if we need verification or just
2189 * Returns: non-0 value if arg can't be stored on a target.
2192 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2194 MonoType *simple_type;
2197 if (target->byref) {
2198 /* FIXME: check that the pointed to types match */
2199 if (arg->type == STACK_MP) {
2200 if (cfg->verbose_level) printf ("ok\n");
2201 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2202 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2203 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2205 /* if the target is native int& or same type */
2206 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2209 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2210 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2211 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2215 if (arg->type == STACK_PTR)
2220 simple_type = mini_get_underlying_type (target);
2221 switch (simple_type->type) {
2222 case MONO_TYPE_VOID:
2230 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2234 /* STACK_MP is needed when setting pinned locals */
2235 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2240 case MONO_TYPE_FNPTR:
2242 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2243 * in native int. (#688008).
2245 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2248 case MONO_TYPE_CLASS:
2249 case MONO_TYPE_STRING:
2250 case MONO_TYPE_OBJECT:
2251 case MONO_TYPE_SZARRAY:
2252 case MONO_TYPE_ARRAY:
2253 if (arg->type != STACK_OBJ)
2255 /* FIXME: check type compatibility */
2259 if (arg->type != STACK_I8)
2263 if (arg->type != cfg->r4_stack_type)
2267 if (arg->type != STACK_R8)
2270 case MONO_TYPE_VALUETYPE:
2271 if (arg->type != STACK_VTYPE)
2273 klass = mono_class_from_mono_type (simple_type);
2274 if (klass != arg->klass)
2277 case MONO_TYPE_TYPEDBYREF:
2278 if (arg->type != STACK_VTYPE)
2280 klass = mono_class_from_mono_type (simple_type);
2281 if (klass != arg->klass)
2284 case MONO_TYPE_GENERICINST:
2285 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2286 MonoClass *target_class;
2287 if (arg->type != STACK_VTYPE)
2289 klass = mono_class_from_mono_type (simple_type);
2290 target_class = mono_class_from_mono_type (target);
2291 /* The second cases is needed when doing partial sharing */
2292 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2296 if (arg->type != STACK_OBJ)
2298 /* FIXME: check type compatibility */
2302 case MONO_TYPE_MVAR:
2303 g_assert (cfg->gshared);
2304 if (mini_type_var_is_vt (simple_type)) {
2305 if (arg->type != STACK_VTYPE)
2308 if (arg->type != STACK_OBJ)
2313 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2319 * Prepare arguments for passing to a function call.
2320 * Return a non-zero value if the arguments can't be passed to the given
2322 * The type checks are not yet complete and some conversions may need
2323 * casts on 32 or 64 bit architectures.
2325 * FIXME: implement this using target_type_is_incompatible ()
2328 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2330 MonoType *simple_type;
2334 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2338 for (i = 0; i < sig->param_count; ++i) {
2339 if (sig->params [i]->byref) {
2340 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2344 simple_type = mini_get_underlying_type (sig->params [i]);
2346 switch (simple_type->type) {
2347 case MONO_TYPE_VOID:
2356 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2362 case MONO_TYPE_FNPTR:
2363 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2366 case MONO_TYPE_CLASS:
2367 case MONO_TYPE_STRING:
2368 case MONO_TYPE_OBJECT:
2369 case MONO_TYPE_SZARRAY:
2370 case MONO_TYPE_ARRAY:
2371 if (args [i]->type != STACK_OBJ)
2376 if (args [i]->type != STACK_I8)
2380 if (args [i]->type != cfg->r4_stack_type)
2384 if (args [i]->type != STACK_R8)
2387 case MONO_TYPE_VALUETYPE:
2388 if (simple_type->data.klass->enumtype) {
2389 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2392 if (args [i]->type != STACK_VTYPE)
2395 case MONO_TYPE_TYPEDBYREF:
2396 if (args [i]->type != STACK_VTYPE)
2399 case MONO_TYPE_GENERICINST:
2400 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2403 case MONO_TYPE_MVAR:
2405 if (args [i]->type != STACK_VTYPE)
2409 g_error ("unknown type 0x%02x in check_call_signature",
2417 callvirt_to_call (int opcode)
2420 case OP_CALL_MEMBASE:
2422 case OP_VOIDCALL_MEMBASE:
2424 case OP_FCALL_MEMBASE:
2426 case OP_RCALL_MEMBASE:
2428 case OP_VCALL_MEMBASE:
2430 case OP_LCALL_MEMBASE:
2433 g_assert_not_reached ();
2440 callvirt_to_call_reg (int opcode)
2443 case OP_CALL_MEMBASE:
2445 case OP_VOIDCALL_MEMBASE:
2446 return OP_VOIDCALL_REG;
2447 case OP_FCALL_MEMBASE:
2448 return OP_FCALL_REG;
2449 case OP_RCALL_MEMBASE:
2450 return OP_RCALL_REG;
2451 case OP_VCALL_MEMBASE:
2452 return OP_VCALL_REG;
2453 case OP_LCALL_MEMBASE:
2454 return OP_LCALL_REG;
2456 g_assert_not_reached ();
2462 /* Either METHOD or IMT_ARG needs to be set */
2464 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2468 if (COMPILE_LLVM (cfg)) {
2470 method_reg = alloc_preg (cfg);
2471 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2473 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2474 method_reg = ins->dreg;
2478 call->imt_arg_reg = method_reg;
2480 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2485 method_reg = alloc_preg (cfg);
2486 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2488 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2489 method_reg = ins->dreg;
2492 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2495 static MonoJumpInfo *
2496 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2498 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2502 ji->data.target = target;
2508 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2511 return mono_class_check_context_used (klass);
2517 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2520 return mono_method_check_context_used (method);
2526 * check_method_sharing:
2528 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2531 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2533 gboolean pass_vtable = FALSE;
2534 gboolean pass_mrgctx = FALSE;
2536 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2537 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2538 gboolean sharable = FALSE;
2540 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2544 * Pass vtable iff target method might
2545 * be shared, which means that sharing
2546 * is enabled for its class and its
2547 * context is sharable (and it's not a
2550 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2554 if (mini_method_get_context (cmethod) &&
2555 mini_method_get_context (cmethod)->method_inst) {
2556 g_assert (!pass_vtable);
2558 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2561 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2566 if (out_pass_vtable)
2567 *out_pass_vtable = pass_vtable;
2568 if (out_pass_mrgctx)
2569 *out_pass_mrgctx = pass_mrgctx;
2572 inline static MonoCallInst *
2573 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2574 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2578 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2586 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2588 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2590 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2593 call->signature = sig;
2594 call->rgctx_reg = rgctx;
2595 sig_ret = mini_get_underlying_type (sig->ret);
2597 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2600 if (mini_type_is_vtype (sig_ret)) {
2601 call->vret_var = cfg->vret_addr;
2602 //g_assert_not_reached ();
2604 } else if (mini_type_is_vtype (sig_ret)) {
2605 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2608 temp->backend.is_pinvoke = sig->pinvoke;
2611 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2612 * address of return value to increase optimization opportunities.
2613 * Before vtype decomposition, the dreg of the call ins itself represents the
2614 * fact the call modifies the return value. After decomposition, the call will
2615 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2616 * will be transformed into an LDADDR.
2618 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2619 loada->dreg = alloc_preg (cfg);
2620 loada->inst_p0 = temp;
2621 /* We reference the call too since call->dreg could change during optimization */
2622 loada->inst_p1 = call;
2623 MONO_ADD_INS (cfg->cbb, loada);
2625 call->inst.dreg = temp->dreg;
2627 call->vret_var = loada;
2628 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2629 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2631 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2632 if (COMPILE_SOFT_FLOAT (cfg)) {
2634 * If the call has a float argument, we would need to do an r8->r4 conversion using
2635 * an icall, but that cannot be done during the call sequence since it would clobber
2636 * the call registers + the stack. So we do it before emitting the call.
2638 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2640 MonoInst *in = call->args [i];
2642 if (i >= sig->hasthis)
2643 t = sig->params [i - sig->hasthis];
2645 t = &mono_defaults.int_class->byval_arg;
2646 t = mono_type_get_underlying_type (t);
2648 if (!t->byref && t->type == MONO_TYPE_R4) {
2649 MonoInst *iargs [1];
2653 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2655 /* The result will be in an int vreg */
2656 call->args [i] = conv;
2662 call->need_unbox_trampoline = unbox_trampoline;
2665 if (COMPILE_LLVM (cfg))
2666 mono_llvm_emit_call (cfg, call);
2668 mono_arch_emit_call (cfg, call);
2670 mono_arch_emit_call (cfg, call);
2673 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2674 cfg->flags |= MONO_CFG_HAS_CALLS;
2680 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2682 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2683 cfg->uses_rgctx_reg = TRUE;
2684 call->rgctx_reg = TRUE;
2686 call->rgctx_arg_reg = rgctx_reg;
2690 inline static MonoInst*
2691 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2696 gboolean check_sp = FALSE;
2698 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2699 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2701 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2706 rgctx_reg = mono_alloc_preg (cfg);
2707 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2711 if (!cfg->stack_inbalance_var)
2712 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2714 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2715 ins->dreg = cfg->stack_inbalance_var->dreg;
2716 MONO_ADD_INS (cfg->cbb, ins);
2719 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2721 call->inst.sreg1 = addr->dreg;
2724 emit_imt_argument (cfg, call, NULL, imt_arg);
2726 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2731 sp_reg = mono_alloc_preg (cfg);
2733 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2735 MONO_ADD_INS (cfg->cbb, ins);
2737 /* Restore the stack so we don't crash when throwing the exception */
2738 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2739 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2740 MONO_ADD_INS (cfg->cbb, ins);
2742 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2747 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2749 return (MonoInst*)call;
2753 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2756 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2758 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2761 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2762 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2764 #ifndef DISABLE_REMOTING
2765 gboolean might_be_remote = FALSE;
2767 gboolean virtual_ = this_ins != NULL;
2768 gboolean enable_for_aot = TRUE;
2771 MonoInst *call_target = NULL;
2773 gboolean need_unbox_trampoline;
2776 sig = mono_method_signature (method);
2778 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2779 g_assert_not_reached ();
2782 rgctx_reg = mono_alloc_preg (cfg);
2783 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2786 if (method->string_ctor) {
2787 /* Create the real signature */
2788 /* FIXME: Cache these */
2789 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2790 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2795 context_used = mini_method_check_context_used (cfg, method);
2797 #ifndef DISABLE_REMOTING
2798 might_be_remote = this_ins && sig->hasthis &&
2799 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2800 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2802 if (might_be_remote && context_used) {
2805 g_assert (cfg->gshared);
2807 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2809 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2813 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2814 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2816 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2818 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2820 #ifndef DISABLE_REMOTING
2821 if (might_be_remote)
2822 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2825 call->method = method;
2826 call->inst.flags |= MONO_INST_HAS_METHOD;
2827 call->inst.inst_left = this_ins;
2828 call->tail_call = tail;
2831 int vtable_reg, slot_reg, this_reg;
2834 this_reg = this_ins->dreg;
2836 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2837 MonoInst *dummy_use;
2839 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2841 /* Make a call to delegate->invoke_impl */
2842 call->inst.inst_basereg = this_reg;
2843 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2844 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2846 /* We must emit a dummy use here because the delegate trampoline will
2847 replace the 'this' argument with the delegate target making this activation
2848 no longer a root for the delegate.
2849 This is an issue for delegates that target collectible code such as dynamic
2850 methods of GC'able assemblies.
2852 For a test case look into #667921.
2854 FIXME: a dummy use is not the best way to do it as the local register allocator
2855 will put it on a caller save register and spil it around the call.
2856 Ideally, we would either put it on a callee save register or only do the store part.
2858 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2860 return (MonoInst*)call;
2863 if ((!cfg->compile_aot || enable_for_aot) &&
2864 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2865 (MONO_METHOD_IS_FINAL (method) &&
2866 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2867 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2869 * the method is not virtual, we just need to ensure this is not null
2870 * and then we can call the method directly.
2872 #ifndef DISABLE_REMOTING
2873 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2875 * The check above ensures method is not gshared, this is needed since
2876 * gshared methods can't have wrappers.
2878 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2882 if (!method->string_ctor)
2883 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2885 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2886 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2888 * the method is virtual, but we can statically dispatch since either
2889 * it's class or the method itself are sealed.
2890 * But first we need to ensure it's not a null reference.
2892 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2894 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2895 } else if (call_target) {
2896 vtable_reg = alloc_preg (cfg);
2897 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2899 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2900 call->inst.sreg1 = call_target->dreg;
2901 call->inst.flags &= !MONO_INST_HAS_METHOD;
2903 vtable_reg = alloc_preg (cfg);
2904 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2905 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2906 guint32 imt_slot = mono_method_get_imt_slot (method);
2907 emit_imt_argument (cfg, call, call->method, imt_arg);
2908 slot_reg = vtable_reg;
2909 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2911 slot_reg = vtable_reg;
2912 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2913 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2915 g_assert (mono_method_signature (method)->generic_param_count);
2916 emit_imt_argument (cfg, call, call->method, imt_arg);
2920 call->inst.sreg1 = slot_reg;
2921 call->inst.inst_offset = offset;
2922 call->is_virtual = TRUE;
2926 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2929 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2931 return (MonoInst*)call;
2935 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2937 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2941 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2948 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2951 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2953 return (MonoInst*)call;
2957 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2959 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2963 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2967 * mono_emit_abs_call:
2969 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2971 inline static MonoInst*
2972 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2973 MonoMethodSignature *sig, MonoInst **args)
2975 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2979 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2982 if (cfg->abs_patches == NULL)
2983 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2984 g_hash_table_insert (cfg->abs_patches, ji, ji);
2985 ins = mono_emit_native_call (cfg, ji, sig, args);
2986 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2990 static MonoMethodSignature*
2991 sig_to_rgctx_sig (MonoMethodSignature *sig)
2993 // FIXME: memory allocation
2994 MonoMethodSignature *res;
2997 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2998 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2999 res->param_count = sig->param_count + 1;
3000 for (i = 0; i < sig->param_count; ++i)
3001 res->params [i] = sig->params [i];
3002 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3006 /* Make an indirect call to FSIG passing an additional argument */
3008 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3010 MonoMethodSignature *csig;
3011 MonoInst *args_buf [16];
3013 int i, pindex, tmp_reg;
3015 /* Make a call with an rgctx/extra arg */
3016 if (fsig->param_count + 2 < 16)
3019 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3022 args [pindex ++] = orig_args [0];
3023 for (i = 0; i < fsig->param_count; ++i)
3024 args [pindex ++] = orig_args [fsig->hasthis + i];
3025 tmp_reg = alloc_preg (cfg);
3026 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3027 csig = sig_to_rgctx_sig (fsig);
3028 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3031 /* Emit an indirect call to the function descriptor ADDR */
3033 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3035 int addr_reg, arg_reg;
3036 MonoInst *call_target;
3038 g_assert (cfg->llvm_only);
3041 * addr points to a <addr, arg> pair, load both of them, and
3042 * make a call to addr, passing arg as an extra arg.
3044 addr_reg = alloc_preg (cfg);
3045 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3046 arg_reg = alloc_preg (cfg);
3047 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3049 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3053 direct_icalls_enabled (MonoCompile *cfg)
3055 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3057 if (cfg->compile_llvm && !cfg->llvm_only)
3060 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3066 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
3069 * Call the jit icall without a wrapper if possible.
3070 * The wrapper is needed for the following reasons:
3071 * - to handle exceptions thrown using mono_raise_exceptions () from the
3072 * icall function. The EH code needs the lmf frame pushed by the
3073 * wrapper to be able to unwind back to managed code.
3074 * - to be able to do stack walks for asynchronously suspended
3075 * threads when debugging.
3077 if (info->no_raise && direct_icalls_enabled (cfg)) {
3081 if (!info->wrapper_method) {
3082 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3083 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3085 mono_memory_barrier ();
3089 * Inline the wrapper method, which is basically a call to the C icall, and
3090 * an exception check.
3092 costs = inline_method (cfg, info->wrapper_method, NULL,
3093 args, NULL, il_offset, TRUE);
3094 g_assert (costs > 0);
3095 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3099 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3104 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3106 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3107 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3111 * Native code might return non register sized integers
3112 * without initializing the upper bits.
3114 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3115 case OP_LOADI1_MEMBASE:
3116 widen_op = OP_ICONV_TO_I1;
3118 case OP_LOADU1_MEMBASE:
3119 widen_op = OP_ICONV_TO_U1;
3121 case OP_LOADI2_MEMBASE:
3122 widen_op = OP_ICONV_TO_I2;
3124 case OP_LOADU2_MEMBASE:
3125 widen_op = OP_ICONV_TO_U2;
3131 if (widen_op != -1) {
3132 int dreg = alloc_preg (cfg);
3135 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3136 widen->type = ins->type;
3147 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
3149 MonoInst *args [16];
3151 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
3152 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
3154 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
3158 get_memcpy_method (void)
3160 static MonoMethod *memcpy_method = NULL;
3161 if (!memcpy_method) {
3162 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3164 g_error ("Old corlib found. Install a new one");
3166 return memcpy_method;
3170 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3172 MonoClassField *field;
3173 gpointer iter = NULL;
3175 while ((field = mono_class_get_fields (klass, &iter))) {
3178 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3180 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3181 if (mini_type_is_reference (mono_field_get_type (field))) {
3182 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3183 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3185 MonoClass *field_class = mono_class_from_mono_type (field->type);
3186 if (field_class->has_references)
3187 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3193 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3195 int card_table_shift_bits;
3196 gpointer card_table_mask;
3198 MonoInst *dummy_use;
3199 int nursery_shift_bits;
3200 size_t nursery_size;
3202 if (!cfg->gen_write_barriers)
3205 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3207 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3209 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3212 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3213 wbarrier->sreg1 = ptr->dreg;
3214 wbarrier->sreg2 = value->dreg;
3215 MONO_ADD_INS (cfg->cbb, wbarrier);
3216 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3217 int offset_reg = alloc_preg (cfg);
3221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3222 if (card_table_mask)
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3225 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3226 * IMM's larger than 32bits.
3228 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3229 card_reg = ins->dreg;
3231 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3232 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3234 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3235 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3238 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3242 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3244 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3245 unsigned need_wb = 0;
3250 /*types with references can't have alignment smaller than sizeof(void*) */
3251 if (align < SIZEOF_VOID_P)
3254 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3255 if (size > 32 * SIZEOF_VOID_P)
3258 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3260 /* We don't unroll more than 5 stores to avoid code bloat. */
3261 if (size > 5 * SIZEOF_VOID_P) {
3262 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3263 size += (SIZEOF_VOID_P - 1);
3264 size &= ~(SIZEOF_VOID_P - 1);
3266 EMIT_NEW_ICONST (cfg, iargs [2], size);
3267 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3268 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3272 destreg = iargs [0]->dreg;
3273 srcreg = iargs [1]->dreg;
3276 dest_ptr_reg = alloc_preg (cfg);
3277 tmp_reg = alloc_preg (cfg);
3280 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3282 while (size >= SIZEOF_VOID_P) {
3283 MonoInst *load_inst;
3284 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3285 load_inst->dreg = tmp_reg;
3286 load_inst->inst_basereg = srcreg;
3287 load_inst->inst_offset = offset;
3288 MONO_ADD_INS (cfg->cbb, load_inst);
3290 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3293 emit_write_barrier (cfg, iargs [0], load_inst);
3295 offset += SIZEOF_VOID_P;
3296 size -= SIZEOF_VOID_P;
3299 /*tmp += sizeof (void*)*/
3300 if (size >= SIZEOF_VOID_P) {
3301 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3302 MONO_ADD_INS (cfg->cbb, iargs [0]);
3306 /* Those cannot be references since size < sizeof (void*) */
3308 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3315 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3316 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3322 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3323 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3332 * Emit code to copy a valuetype of type @klass whose address is stored in
3333 * @src->dreg to memory whose address is stored at @dest->dreg.
3336 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3338 MonoInst *iargs [4];
3341 MonoMethod *memcpy_method;
3342 MonoInst *size_ins = NULL;
3343 MonoInst *memcpy_ins = NULL;
3347 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3350 * This check breaks with spilled vars... need to handle it during verification anyway.
3351 * g_assert (klass && klass == src->klass && klass == dest->klass);
3354 if (mini_is_gsharedvt_klass (klass)) {
3356 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3357 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3361 n = mono_class_native_size (klass, &align);
3363 n = mono_class_value_size (klass, &align);
3365 /* if native is true there should be no references in the struct */
3366 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3367 /* Avoid barriers when storing to the stack */
3368 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3369 (dest->opcode == OP_LDADDR))) {
3375 context_used = mini_class_check_context_used (cfg, klass);
3377 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3378 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3380 } else if (context_used) {
3381 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3383 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3384 if (!cfg->compile_aot)
3385 mono_class_compute_gc_descriptor (klass);
3389 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3391 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3396 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3397 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3398 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3403 iargs [2] = size_ins;
3405 EMIT_NEW_ICONST (cfg, iargs [2], n);
3407 memcpy_method = get_memcpy_method ();
3409 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3411 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3416 get_memset_method (void)
3418 static MonoMethod *memset_method = NULL;
3419 if (!memset_method) {
3420 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3422 g_error ("Old corlib found. Install a new one");
3424 return memset_method;
3428 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3430 MonoInst *iargs [3];
3433 MonoMethod *memset_method;
3434 MonoInst *size_ins = NULL;
3435 MonoInst *bzero_ins = NULL;
3436 static MonoMethod *bzero_method;
3438 /* FIXME: Optimize this for the case when dest is an LDADDR */
3439 mono_class_init (klass);
3440 if (mini_is_gsharedvt_klass (klass)) {
3441 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3442 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3444 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3445 g_assert (bzero_method);
3447 iargs [1] = size_ins;
3448 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3452 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3454 n = mono_class_value_size (klass, &align);
3456 if (n <= sizeof (gpointer) * 8) {
3457 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3460 memset_method = get_memset_method ();
3462 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3463 EMIT_NEW_ICONST (cfg, iargs [2], n);
3464 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3471 * Emit IR to return either the this pointer for instance method,
3472 * or the mrgctx for static methods.
3475 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3477 MonoInst *this_ins = NULL;
3479 g_assert (cfg->gshared);
3481 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3482 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3483 !method->klass->valuetype)
3484 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3486 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3487 MonoInst *mrgctx_loc, *mrgctx_var;
3489 g_assert (!this_ins);
3490 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3492 mrgctx_loc = mono_get_vtable_var (cfg);
3493 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3496 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3497 MonoInst *vtable_loc, *vtable_var;
3499 g_assert (!this_ins);
3501 vtable_loc = mono_get_vtable_var (cfg);
3502 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3504 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3505 MonoInst *mrgctx_var = vtable_var;
3508 vtable_reg = alloc_preg (cfg);
3509 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3510 vtable_var->type = STACK_PTR;
3518 vtable_reg = alloc_preg (cfg);
3519 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3524 static MonoJumpInfoRgctxEntry *
3525 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3527 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3528 res->method = method;
3529 res->in_mrgctx = in_mrgctx;
3530 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3531 res->data->type = patch_type;
3532 res->data->data.target = patch_data;
3533 res->info_type = info_type;
3538 static inline MonoInst*
3539 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3541 MonoInst *args [16];
3544 // FIXME: No fastpath since the slot is not a compile time constant
3546 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3547 if (entry->in_mrgctx)
3548 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3550 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3554 * FIXME: This can be called during decompose, which is a problem since it creates
3556 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3558 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3560 MonoBasicBlock *is_null_bb, *end_bb;
3561 MonoInst *res, *ins, *call;
3564 slot = mini_get_rgctx_entry_slot (entry);
3566 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3567 index = MONO_RGCTX_SLOT_INDEX (slot);
3569 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3570 for (depth = 0; ; ++depth) {
3571 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3573 if (index < size - 1)
3578 NEW_BBLOCK (cfg, end_bb);
3579 NEW_BBLOCK (cfg, is_null_bb);
3582 rgctx_reg = rgctx->dreg;
3584 rgctx_reg = alloc_preg (cfg);
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3587 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3588 NEW_BBLOCK (cfg, is_null_bb);
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3594 for (i = 0; i < depth; ++i) {
3595 int array_reg = alloc_preg (cfg);
3597 /* load ptr to next array */
3598 if (mrgctx && i == 0)
3599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3602 rgctx_reg = array_reg;
3603 /* is the ptr null? */
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3605 /* if yes, jump to actual trampoline */
3606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3610 val_reg = alloc_preg (cfg);
3611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3612 /* is the slot null? */
3613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3614 /* if yes, jump to actual trampoline */
3615 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3618 res_reg = alloc_preg (cfg);
3619 MONO_INST_NEW (cfg, ins, OP_MOVE);
3620 ins->dreg = res_reg;
3621 ins->sreg1 = val_reg;
3622 MONO_ADD_INS (cfg->cbb, ins);
3624 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3627 MONO_START_BB (cfg, is_null_bb);
3629 EMIT_NEW_ICONST (cfg, args [1], index);
3631 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3633 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3634 MONO_INST_NEW (cfg, ins, OP_MOVE);
3635 ins->dreg = res_reg;
3636 ins->sreg1 = call->dreg;
3637 MONO_ADD_INS (cfg->cbb, ins);
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3640 MONO_START_BB (cfg, end_bb);
3649 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3652 static inline MonoInst*
3653 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3656 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3658 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3662 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3663 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3665 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3666 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3668 return emit_rgctx_fetch (cfg, rgctx, entry);
3672 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3673 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3675 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3676 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3678 return emit_rgctx_fetch (cfg, rgctx, entry);
3682 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3683 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3685 MonoJumpInfoGSharedVtCall *call_info;
3686 MonoJumpInfoRgctxEntry *entry;
3689 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3690 call_info->sig = sig;
3691 call_info->method = cmethod;
3693 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3694 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3696 return emit_rgctx_fetch (cfg, rgctx, entry);
3700 * emit_get_rgctx_virt_method:
3702 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3705 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3706 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3708 MonoJumpInfoVirtMethod *info;
3709 MonoJumpInfoRgctxEntry *entry;
3712 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3713 info->klass = klass;
3714 info->method = virt_method;
3716 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3717 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3724 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3726 MonoJumpInfoRgctxEntry *entry;
3729 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3730 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3732 return emit_rgctx_fetch (cfg, rgctx, entry);
3736 * emit_get_rgctx_method:
3738 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3739 * normal constants, else emit a load from the rgctx.
3742 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3743 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3745 if (!context_used) {
3748 switch (rgctx_type) {
3749 case MONO_RGCTX_INFO_METHOD:
3750 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3752 case MONO_RGCTX_INFO_METHOD_RGCTX:
3753 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3756 g_assert_not_reached ();
3759 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3760 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3762 return emit_rgctx_fetch (cfg, rgctx, entry);
3767 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3768 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3770 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3771 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3773 return emit_rgctx_fetch (cfg, rgctx, entry);
3777 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3779 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3780 MonoRuntimeGenericContextInfoTemplate *template_;
3785 for (i = 0; i < info->num_entries; ++i) {
3786 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3788 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3792 if (info->num_entries == info->count_entries) {
3793 MonoRuntimeGenericContextInfoTemplate *new_entries;
3794 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3796 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3798 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3799 info->entries = new_entries;
3800 info->count_entries = new_count_entries;
3803 idx = info->num_entries;
3804 template_ = &info->entries [idx];
3805 template_->info_type = rgctx_type;
3806 template_->data = data;
3808 info->num_entries ++;
3814 * emit_get_gsharedvt_info:
3816 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3819 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3824 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3825 /* Load info->entries [idx] */
3826 dreg = alloc_preg (cfg);
3827 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3833 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3835 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3839 * On return the caller must check @klass for load errors.
3842 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3844 MonoInst *vtable_arg;
3847 context_used = mini_class_check_context_used (cfg, klass);
3850 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3851 klass, MONO_RGCTX_INFO_VTABLE);
3853 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3857 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3860 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3864 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3865 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3867 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3868 ins->sreg1 = vtable_arg->dreg;
3869 MONO_ADD_INS (cfg->cbb, ins);
3871 static int byte_offset = -1;
3872 static guint8 bitmask;
3873 int bits_reg, inited_reg;
3874 MonoBasicBlock *inited_bb;
3875 MonoInst *args [16];
3877 if (byte_offset < 0)
3878 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3880 bits_reg = alloc_ireg (cfg);
3881 inited_reg = alloc_ireg (cfg);
3883 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3884 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3886 NEW_BBLOCK (cfg, inited_bb);
3888 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3889 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3891 args [0] = vtable_arg;
3892 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3894 MONO_START_BB (cfg, inited_bb);
3899 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3903 if (cfg->gen_seq_points && cfg->method == method) {
3904 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3906 ins->flags |= MONO_INST_NONEMPTY_STACK;
3907 MONO_ADD_INS (cfg->cbb, ins);
3912 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3914 if (mini_get_debug_options ()->better_cast_details) {
3915 int vtable_reg = alloc_preg (cfg);
3916 int klass_reg = alloc_preg (cfg);
3917 MonoBasicBlock *is_null_bb = NULL;
3919 int to_klass_reg, context_used;
3922 NEW_BBLOCK (cfg, is_null_bb);
3924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3925 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3928 tls_get = mono_get_jit_tls_intrinsic (cfg);
3930 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3934 MONO_ADD_INS (cfg->cbb, tls_get);
3935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3938 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3940 context_used = mini_class_check_context_used (cfg, klass);
3942 MonoInst *class_ins;
3944 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3945 to_klass_reg = class_ins->dreg;
3947 to_klass_reg = alloc_preg (cfg);
3948 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3950 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3953 MONO_START_BB (cfg, is_null_bb);
3958 reset_cast_details (MonoCompile *cfg)
3960 /* Reset the variables holding the cast details */
3961 if (mini_get_debug_options ()->better_cast_details) {
3962 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3964 MONO_ADD_INS (cfg->cbb, tls_get);
3965 /* It is enough to reset the from field */
3966 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3971 * On return the caller must check @array_class for load errors
3974 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3976 int vtable_reg = alloc_preg (cfg);
3979 context_used = mini_class_check_context_used (cfg, array_class);
3981 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3983 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3985 if (cfg->opt & MONO_OPT_SHARED) {
3986 int class_reg = alloc_preg (cfg);
3989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3990 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3991 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3992 } else if (context_used) {
3993 MonoInst *vtable_ins;
3995 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3996 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3998 if (cfg->compile_aot) {
4002 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4004 vt_reg = alloc_preg (cfg);
4005 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
4006 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
4009 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4015 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4017 reset_cast_details (cfg);
4021 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4022 * generic code is generated.
4025 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4027 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4030 MonoInst *rgctx, *addr;
4032 /* FIXME: What if the class is shared? We might not
4033 have to get the address of the method from the
4035 addr = emit_get_rgctx_method (cfg, context_used, method,
4036 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4037 if (cfg->llvm_only) {
4038 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4040 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4042 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4045 gboolean pass_vtable, pass_mrgctx;
4046 MonoInst *rgctx_arg = NULL;
4048 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4049 g_assert (!pass_mrgctx);
4052 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4055 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4058 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4063 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4067 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4068 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4069 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4070 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4072 obj_reg = sp [0]->dreg;
4073 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4074 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4076 /* FIXME: generics */
4077 g_assert (klass->rank == 0);
4080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4081 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4087 MonoInst *element_class;
4089 /* This assertion is from the unboxcast insn */
4090 g_assert (klass->rank == 0);
4092 element_class = emit_get_rgctx_klass (cfg, context_used,
4093 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4095 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4096 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4098 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4099 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4100 reset_cast_details (cfg);
4103 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4104 MONO_ADD_INS (cfg->cbb, add);
4105 add->type = STACK_MP;
4112 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4114 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4115 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4119 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4125 args [1] = klass_inst;
4128 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4130 NEW_BBLOCK (cfg, is_ref_bb);
4131 NEW_BBLOCK (cfg, is_nullable_bb);
4132 NEW_BBLOCK (cfg, end_bb);
4133 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4140 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4141 addr_reg = alloc_dreg (cfg, STACK_MP);
4145 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4146 MONO_ADD_INS (cfg->cbb, addr);
4148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4151 MONO_START_BB (cfg, is_ref_bb);
4153 /* Save the ref to a temporary */
4154 dreg = alloc_ireg (cfg);
4155 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4156 addr->dreg = addr_reg;
4157 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4161 MONO_START_BB (cfg, is_nullable_bb);
4164 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4165 MonoInst *unbox_call;
4166 MonoMethodSignature *unbox_sig;
4168 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4169 unbox_sig->ret = &klass->byval_arg;
4170 unbox_sig->param_count = 1;
4171 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4174 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4176 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4178 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4179 addr->dreg = addr_reg;
4182 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4185 MONO_START_BB (cfg, end_bb);
4188 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4194 * Returns NULL and set the cfg exception on error.
4197 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4199 MonoInst *iargs [2];
4204 MonoRgctxInfoType rgctx_info;
4205 MonoInst *iargs [2];
4206 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4208 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4210 if (cfg->opt & MONO_OPT_SHARED)
4211 rgctx_info = MONO_RGCTX_INFO_KLASS;
4213 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4214 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4216 if (cfg->opt & MONO_OPT_SHARED) {
4217 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4219 alloc_ftn = ves_icall_object_new;
4222 alloc_ftn = ves_icall_object_new_specific;
4225 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4226 if (known_instance_size) {
4227 int size = mono_class_instance_size (klass);
4228 if (size < sizeof (MonoObject))
4229 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4231 EMIT_NEW_ICONST (cfg, iargs [1], size);
4233 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4236 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4239 if (cfg->opt & MONO_OPT_SHARED) {
4240 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4241 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4243 alloc_ftn = ves_icall_object_new;
4244 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4245 /* This happens often in argument checking code, eg. throw new FooException... */
4246 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4247 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4248 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4250 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4251 MonoMethod *managed_alloc = NULL;
4255 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4256 cfg->exception_ptr = klass;
4260 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4262 if (managed_alloc) {
4263 int size = mono_class_instance_size (klass);
4264 if (size < sizeof (MonoObject))
4265 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4267 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4268 EMIT_NEW_ICONST (cfg, iargs [1], size);
4269 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4271 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4273 guint32 lw = vtable->klass->instance_size;
4274 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4275 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4276 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4279 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4283 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4287 * Returns NULL and set the cfg exception on error.
4290 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4292 MonoInst *alloc, *ins;
4294 if (mono_class_is_nullable (klass)) {
4295 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4298 if (cfg->llvm_only && cfg->gsharedvt) {
4299 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4300 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4301 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4303 /* FIXME: What if the class is shared? We might not
4304 have to get the method address from the RGCTX. */
4305 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4306 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4307 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4309 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4312 gboolean pass_vtable, pass_mrgctx;
4313 MonoInst *rgctx_arg = NULL;
4315 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4316 g_assert (!pass_mrgctx);
4319 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4322 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4325 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4329 if (mini_is_gsharedvt_klass (klass)) {
4330 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4331 MonoInst *res, *is_ref, *src_var, *addr;
4334 dreg = alloc_ireg (cfg);
4336 NEW_BBLOCK (cfg, is_ref_bb);
4337 NEW_BBLOCK (cfg, is_nullable_bb);
4338 NEW_BBLOCK (cfg, end_bb);
4339 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4344 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4347 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4350 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4351 ins->opcode = OP_STOREV_MEMBASE;
4353 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4354 res->type = STACK_OBJ;
4356 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4359 MONO_START_BB (cfg, is_ref_bb);
4361 /* val is a vtype, so has to load the value manually */
4362 src_var = get_vreg_to_inst (cfg, val->dreg);
4364 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4365 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4370 MONO_START_BB (cfg, is_nullable_bb);
4373 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4374 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4376 MonoMethodSignature *box_sig;
4379 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4380 * construct that method at JIT time, so have to do things by hand.
4382 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4383 box_sig->ret = &mono_defaults.object_class->byval_arg;
4384 box_sig->param_count = 1;
4385 box_sig->params [0] = &klass->byval_arg;
4388 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4390 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4391 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4392 res->type = STACK_OBJ;
4396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4398 MONO_START_BB (cfg, end_bb);
4402 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4406 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4412 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4415 MonoGenericContainer *container;
4416 MonoGenericInst *ginst;
4418 if (klass->generic_class) {
4419 container = klass->generic_class->container_class->generic_container;
4420 ginst = klass->generic_class->context.class_inst;
4421 } else if (klass->generic_container && context_used) {
4422 container = klass->generic_container;
4423 ginst = container->context.class_inst;
4428 for (i = 0; i < container->type_argc; ++i) {
4430 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4432 type = ginst->type_argv [i];
4433 if (mini_type_is_reference (type))
4439 static GHashTable* direct_icall_type_hash;
4442 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4444 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4445 if (!direct_icalls_enabled (cfg))
4449 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4450 * Whitelist a few icalls for now.
4452 if (!direct_icall_type_hash) {
4453 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4455 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4456 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4457 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4458 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4459 mono_memory_barrier ();
4460 direct_icall_type_hash = h;
4463 if (cmethod->klass == mono_defaults.math_class)
4465 /* No locking needed */
4466 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4471 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4474 emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4476 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4477 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4481 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4483 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
4486 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4487 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4488 reset_cast_details (cfg);
4494 get_castclass_cache_idx (MonoCompile *cfg)
4496 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4497 cfg->castclass_cache_index ++;
4498 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4503 emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4508 args [0] = obj; /* obj */
4509 EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
4511 idx = get_castclass_cache_idx (cfg); /* inline cache*/
4512 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4514 return emit_isinst_with_cache (cfg, klass, args);
4518 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4527 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4530 idx = get_castclass_cache_idx (cfg);
4531 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4533 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4534 return emit_castclass_with_cache (cfg, klass, args);
4538 * Returns NULL and set the cfg exception on error.
4541 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4543 MonoBasicBlock *is_null_bb;
4544 int obj_reg = src->dreg;
4545 int vtable_reg = alloc_preg (cfg);
4546 MonoInst *klass_inst = NULL;
4548 if (src->opcode == OP_PCONST && src->inst_p0 == 0)
4554 if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4555 MonoInst *cache_ins;
4557 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4562 /* klass - it's the second element of the cache entry*/
4563 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4566 args [2] = cache_ins;
4568 return emit_castclass_with_cache (cfg, klass, args);
4571 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4574 NEW_BBLOCK (cfg, is_null_bb);
4576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4579 save_cast_details (cfg, klass, obj_reg, FALSE);
4581 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4583 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4585 int klass_reg = alloc_preg (cfg);
4587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4589 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4590 /* the remoting code is broken, access the class for now */
4591 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4592 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4594 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4595 cfg->exception_ptr = klass;
4598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4603 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4606 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4610 MONO_START_BB (cfg, is_null_bb);
4612 reset_cast_details (cfg);
4618 * Returns NULL and set the cfg exception on error.
4621 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4624 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4625 int obj_reg = src->dreg;
4626 int vtable_reg = alloc_preg (cfg);
4627 int res_reg = alloc_ireg_ref (cfg);
4628 MonoInst *klass_inst = NULL;
4633 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4634 MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4636 args [0] = src; /* obj */
4638 /* klass - it's the second element of the cache entry*/
4639 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4641 args [2] = cache_ins; /* cache */
4642 return emit_isinst_with_cache (cfg, klass, args);
4645 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4648 NEW_BBLOCK (cfg, is_null_bb);
4649 NEW_BBLOCK (cfg, false_bb);
4650 NEW_BBLOCK (cfg, end_bb);
4652 /* Do the assignment at the beginning, so the other assignment can be if converted */
4653 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4654 ins->type = STACK_OBJ;
4657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4658 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4662 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4663 g_assert (!context_used);
4664 /* the is_null_bb target simply copies the input register to the output */
4665 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4667 int klass_reg = alloc_preg (cfg);
4670 int rank_reg = alloc_preg (cfg);
4671 int eclass_reg = alloc_preg (cfg);
4673 g_assert (!context_used);
4674 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4679 if (klass->cast_class == mono_defaults.object_class) {
4680 int parent_reg = alloc_preg (cfg);
4681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4682 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4683 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4685 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4686 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4687 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4689 } else if (klass->cast_class == mono_defaults.enum_class) {
4690 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4692 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4693 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4695 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4696 /* Check that the object is a vector too */
4697 int bounds_reg = alloc_preg (cfg);
4698 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4703 /* the is_null_bb target simply copies the input register to the output */
4704 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4706 } else if (mono_class_is_nullable (klass)) {
4707 g_assert (!context_used);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4709 /* the is_null_bb target simply copies the input register to the output */
4710 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4712 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4713 g_assert (!context_used);
4714 /* the remoting code is broken, access the class for now */
4715 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4716 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4718 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4719 cfg->exception_ptr = klass;
4722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4731 /* the is_null_bb target simply copies the input register to the output */
4732 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4737 MONO_START_BB (cfg, false_bb);
4739 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4742 MONO_START_BB (cfg, is_null_bb);
4744 MONO_START_BB (cfg, end_bb);
4750 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4752 /* This opcode takes as input an object reference and a class, and returns:
4753 0) if the object is an instance of the class,
4754 1) if the object is not instance of the class,
4755 2) if the object is a proxy whose type cannot be determined */
4758 #ifndef DISABLE_REMOTING
4759 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4761 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4763 int obj_reg = src->dreg;
4764 int dreg = alloc_ireg (cfg);
4766 #ifndef DISABLE_REMOTING
4767 int klass_reg = alloc_preg (cfg);
4770 NEW_BBLOCK (cfg, true_bb);
4771 NEW_BBLOCK (cfg, false_bb);
4772 NEW_BBLOCK (cfg, end_bb);
4773 #ifndef DISABLE_REMOTING
4774 NEW_BBLOCK (cfg, false2_bb);
4775 NEW_BBLOCK (cfg, no_proxy_bb);
4778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4779 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4781 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4782 #ifndef DISABLE_REMOTING
4783 NEW_BBLOCK (cfg, interface_fail_bb);
4786 tmp_reg = alloc_preg (cfg);
4787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4788 #ifndef DISABLE_REMOTING
4789 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4790 MONO_START_BB (cfg, interface_fail_bb);
4791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4793 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4795 tmp_reg = alloc_preg (cfg);
4796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4800 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4803 #ifndef DISABLE_REMOTING
4804 tmp_reg = alloc_preg (cfg);
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4808 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4809 tmp_reg = alloc_preg (cfg);
4810 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4813 tmp_reg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4816 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4818 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4819 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4821 MONO_START_BB (cfg, no_proxy_bb);
4823 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4825 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4829 MONO_START_BB (cfg, false_bb);
4831 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4832 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4834 #ifndef DISABLE_REMOTING
4835 MONO_START_BB (cfg, false2_bb);
4837 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4841 MONO_START_BB (cfg, true_bb);
4843 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4845 MONO_START_BB (cfg, end_bb);
4848 MONO_INST_NEW (cfg, ins, OP_ICONST);
4850 ins->type = STACK_I4;
4856 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4858 /* This opcode takes as input an object reference and a class, and returns:
4859 0) if the object is an instance of the class,
4860 1) if the object is a proxy whose type cannot be determined
4861 an InvalidCastException exception is thrown otherwhise*/
4864 #ifndef DISABLE_REMOTING
4865 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4867 MonoBasicBlock *ok_result_bb;
4869 int obj_reg = src->dreg;
4870 int dreg = alloc_ireg (cfg);
4871 int tmp_reg = alloc_preg (cfg);
4873 #ifndef DISABLE_REMOTING
4874 int klass_reg = alloc_preg (cfg);
4875 NEW_BBLOCK (cfg, end_bb);
4878 NEW_BBLOCK (cfg, ok_result_bb);
4880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4883 save_cast_details (cfg, klass, obj_reg, FALSE);
4885 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4886 #ifndef DISABLE_REMOTING
4887 NEW_BBLOCK (cfg, interface_fail_bb);
4889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4890 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4891 MONO_START_BB (cfg, interface_fail_bb);
4892 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4894 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4896 tmp_reg = alloc_preg (cfg);
4897 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4899 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4901 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4902 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4905 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4909 #ifndef DISABLE_REMOTING
4910 NEW_BBLOCK (cfg, no_proxy_bb);
4912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4914 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4916 tmp_reg = alloc_preg (cfg);
4917 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4920 tmp_reg = alloc_preg (cfg);
4921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4925 NEW_BBLOCK (cfg, fail_1_bb);
4927 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4929 MONO_START_BB (cfg, fail_1_bb);
4931 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4934 MONO_START_BB (cfg, no_proxy_bb);
4936 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4938 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4942 MONO_START_BB (cfg, ok_result_bb);
4944 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4946 #ifndef DISABLE_REMOTING
4947 MONO_START_BB (cfg, end_bb);
4951 MONO_INST_NEW (cfg, ins, OP_ICONST);
4953 ins->type = STACK_I4;
4958 static G_GNUC_UNUSED MonoInst*
4959 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4961 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4962 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4965 switch (enum_type->type) {
4968 #if SIZEOF_REGISTER == 8
4980 MonoInst *load, *and_, *cmp, *ceq;
4981 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4982 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4983 int dest_reg = alloc_ireg (cfg);
4985 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4986 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4987 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4988 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4990 ceq->type = STACK_I4;
4993 load = mono_decompose_opcode (cfg, load);
4994 and_ = mono_decompose_opcode (cfg, and_);
4995 cmp = mono_decompose_opcode (cfg, cmp);
4996 ceq = mono_decompose_opcode (cfg, ceq);
5004 * Returns NULL and set the cfg exception on error.
5006 static G_GNUC_UNUSED MonoInst*
5007 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5011 gpointer trampoline;
5012 MonoInst *obj, *method_ins, *tramp_ins;
5016 if (virtual_ && !cfg->llvm_only) {
5017 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5020 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5024 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5028 /* Inline the contents of mono_delegate_ctor */
5030 /* Set target field */
5031 /* Optimize away setting of NULL target */
5032 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5033 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5034 if (cfg->gen_write_barriers) {
5035 dreg = alloc_preg (cfg);
5036 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5037 emit_write_barrier (cfg, ptr, target);
5041 /* Set method field */
5042 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5046 * To avoid looking up the compiled code belonging to the target method
5047 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5048 * store it, and we fill it after the method has been compiled.
5050 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5051 MonoInst *code_slot_ins;
5054 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5056 domain = mono_domain_get ();
5057 mono_domain_lock (domain);
5058 if (!domain_jit_info (domain)->method_code_hash)
5059 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5060 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5062 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5063 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5065 mono_domain_unlock (domain);
5067 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5069 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5072 if (cfg->llvm_only) {
5073 MonoInst *args [16];
5078 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5079 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5082 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5088 if (cfg->compile_aot) {
5089 MonoDelegateClassMethodPair *del_tramp;
5091 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5092 del_tramp->klass = klass;
5093 del_tramp->method = context_used ? NULL : method;
5094 del_tramp->is_virtual = virtual_;
5095 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5098 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5100 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5101 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5104 /* Set invoke_impl field */
5106 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5108 dreg = alloc_preg (cfg);
5109 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5112 dreg = alloc_preg (cfg);
5113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5114 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5117 dreg = alloc_preg (cfg);
5118 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5119 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5121 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5127 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5129 MonoJitICallInfo *info;
5131 /* Need to register the icall so it gets an icall wrapper */
5132 info = mono_get_array_new_va_icall (rank);
5134 cfg->flags |= MONO_CFG_HAS_VARARGS;
5136 /* mono_array_new_va () needs a vararg calling convention */
5137 cfg->exception_message = g_strdup ("array-new");
5138 cfg->disable_llvm = TRUE;
5140 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5141 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5145 * handle_constrained_gsharedvt_call:
5147 * Handle constrained calls where the receiver is a gsharedvt type.
5148 * Return the instruction representing the call. Set the cfg exception on failure.
5151 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5152 gboolean *ref_emit_widen)
5154 MonoInst *ins = NULL;
5155 gboolean emit_widen = *ref_emit_widen;
5158 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5159 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5160 * pack the arguments into an array, and do the rest of the work in in an icall.
5162 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5163 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5164 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5165 MonoInst *args [16];
5168 * This case handles calls to
5169 * - object:ToString()/Equals()/GetHashCode(),
5170 * - System.IComparable<T>:CompareTo()
5171 * - System.IEquatable<T>:Equals ()
5172 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5176 if (mono_method_check_context_used (cmethod))
5177 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5179 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5180 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5182 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5183 if (fsig->hasthis && fsig->param_count) {
5184 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5185 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5186 ins->dreg = alloc_preg (cfg);
5187 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5188 MONO_ADD_INS (cfg->cbb, ins);
5191 if (mini_is_gsharedvt_type (fsig->params [0])) {
5192 int addr_reg, deref_arg_reg;
5194 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5195 deref_arg_reg = alloc_preg (cfg);
5196 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5197 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5199 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5200 addr_reg = ins->dreg;
5201 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5203 EMIT_NEW_ICONST (cfg, args [3], 0);
5204 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5207 EMIT_NEW_ICONST (cfg, args [3], 0);
5208 EMIT_NEW_ICONST (cfg, args [4], 0);
5210 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5213 if (mini_is_gsharedvt_type (fsig->ret)) {
5214 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5215 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5219 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5220 MONO_ADD_INS (cfg->cbb, add);
5222 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5223 MONO_ADD_INS (cfg->cbb, ins);
5224 /* ins represents the call result */
5227 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5230 *ref_emit_widen = emit_widen;
5239 mono_emit_load_got_addr (MonoCompile *cfg)
5241 MonoInst *getaddr, *dummy_use;
5243 if (!cfg->got_var || cfg->got_var_allocated)
5246 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5247 getaddr->cil_code = cfg->header->code;
5248 getaddr->dreg = cfg->got_var->dreg;
5250 /* Add it to the start of the first bblock */
5251 if (cfg->bb_entry->code) {
5252 getaddr->next = cfg->bb_entry->code;
5253 cfg->bb_entry->code = getaddr;
5256 MONO_ADD_INS (cfg->bb_entry, getaddr);
5258 cfg->got_var_allocated = TRUE;
5261 * Add a dummy use to keep the got_var alive, since real uses might
5262 * only be generated by the back ends.
5263 * Add it to end_bblock, so the variable's lifetime covers the whole
5265 * It would be better to make the usage of the got var explicit in all
5266 * cases when the backend needs it (i.e. calls, throw etc.), so this
5267 * wouldn't be needed.
5269 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5270 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5273 static int inline_limit;
5274 static gboolean inline_limit_inited;
5277 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5279 MonoMethodHeaderSummary header;
5281 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5282 MonoMethodSignature *sig = mono_method_signature (method);
5286 if (cfg->disable_inline)
5291 if (cfg->inline_depth > 10)
5294 if (!mono_method_get_header_summary (method, &header))
5297 /*runtime, icall and pinvoke are checked by summary call*/
5298 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5299 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5300 (mono_class_is_marshalbyref (method->klass)) ||
5304 /* also consider num_locals? */
5305 /* Do the size check early to avoid creating vtables */
5306 if (!inline_limit_inited) {
5307 if (g_getenv ("MONO_INLINELIMIT"))
5308 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5310 inline_limit = INLINE_LENGTH_LIMIT;
5311 inline_limit_inited = TRUE;
5313 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5317 * if we can initialize the class of the method right away, we do,
5318 * otherwise we don't allow inlining if the class needs initialization,
5319 * since it would mean inserting a call to mono_runtime_class_init()
5320 * inside the inlined code
5322 if (!(cfg->opt & MONO_OPT_SHARED)) {
5323 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5324 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5325 vtable = mono_class_vtable (cfg->domain, method->klass);
5328 if (!cfg->compile_aot) {
5330 if (!mono_runtime_class_init_full (vtable, &error)) {
5331 mono_error_cleanup (&error);
5335 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5336 if (cfg->run_cctors && method->klass->has_cctor) {
5337 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5338 if (!method->klass->runtime_info)
5339 /* No vtable created yet */
5341 vtable = mono_class_vtable (cfg->domain, method->klass);
5344 /* This makes so that inline cannot trigger */
5345 /* .cctors: too many apps depend on them */
5346 /* running with a specific order... */
5347 if (! vtable->initialized)
5350 if (!mono_runtime_class_init_full (vtable, &error)) {
5351 mono_error_cleanup (&error);
5355 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5356 if (!method->klass->runtime_info)
5357 /* No vtable created yet */
5359 vtable = mono_class_vtable (cfg->domain, method->klass);
5362 if (!vtable->initialized)
5367 * If we're compiling for shared code
5368 * the cctor will need to be run at aot method load time, for example,
5369 * or at the end of the compilation of the inlining method.
5371 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5375 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5376 if (mono_arch_is_soft_float ()) {
5378 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5380 for (i = 0; i < sig->param_count; ++i)
5381 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5386 if (g_list_find (cfg->dont_inline, method))
5393 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5395 if (!cfg->compile_aot) {
5397 if (vtable->initialized)
5401 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5402 if (cfg->method == method)
5406 if (!mono_class_needs_cctor_run (klass, method))
5409 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5410 /* The initialization is already done before the method is called */
5417 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5421 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5424 if (mini_is_gsharedvt_variable_klass (klass)) {
5427 mono_class_init (klass);
5428 size = mono_class_array_element_size (klass);
5431 mult_reg = alloc_preg (cfg);
5432 array_reg = arr->dreg;
5433 index_reg = index->dreg;
5435 #if SIZEOF_REGISTER == 8
5436 /* The array reg is 64 bits but the index reg is only 32 */
5437 if (COMPILE_LLVM (cfg)) {
5439 index2_reg = index_reg;
5441 index2_reg = alloc_preg (cfg);
5442 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5445 if (index->type == STACK_I8) {
5446 index2_reg = alloc_preg (cfg);
5447 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5449 index2_reg = index_reg;
5454 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5456 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5457 if (size == 1 || size == 2 || size == 4 || size == 8) {
5458 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5460 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5461 ins->klass = mono_class_get_element_class (klass);
5462 ins->type = STACK_MP;
5468 add_reg = alloc_ireg_mp (cfg);
5471 MonoInst *rgctx_ins;
5474 g_assert (cfg->gshared);
5475 context_used = mini_class_check_context_used (cfg, klass);
5476 g_assert (context_used);
5477 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5478 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5482 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5483 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5484 ins->klass = mono_class_get_element_class (klass);
5485 ins->type = STACK_MP;
5486 MONO_ADD_INS (cfg->cbb, ins);
5492 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5494 int bounds_reg = alloc_preg (cfg);
5495 int add_reg = alloc_ireg_mp (cfg);
5496 int mult_reg = alloc_preg (cfg);
5497 int mult2_reg = alloc_preg (cfg);
5498 int low1_reg = alloc_preg (cfg);
5499 int low2_reg = alloc_preg (cfg);
5500 int high1_reg = alloc_preg (cfg);
5501 int high2_reg = alloc_preg (cfg);
5502 int realidx1_reg = alloc_preg (cfg);
5503 int realidx2_reg = alloc_preg (cfg);
5504 int sum_reg = alloc_preg (cfg);
5505 int index1, index2, tmpreg;
5509 mono_class_init (klass);
5510 size = mono_class_array_element_size (klass);
5512 index1 = index_ins1->dreg;
5513 index2 = index_ins2->dreg;
5515 #if SIZEOF_REGISTER == 8
5516 /* The array reg is 64 bits but the index reg is only 32 */
5517 if (COMPILE_LLVM (cfg)) {
5520 tmpreg = alloc_preg (cfg);
5521 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5523 tmpreg = alloc_preg (cfg);
5524 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5528 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5532 /* range checking */
5533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5534 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5537 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5538 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5539 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5540 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5542 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5545 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5546 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5547 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5548 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5549 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5550 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5552 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5553 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5555 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5556 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5558 ins->type = STACK_MP;
5560 MONO_ADD_INS (cfg->cbb, ins);
5566 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5570 MonoMethod *addr_method;
5572 MonoClass *eclass = cmethod->klass->element_class;
5574 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5577 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5579 /* emit_ldelema_2 depends on OP_LMUL */
5580 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5581 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5584 if (mini_is_gsharedvt_variable_klass (eclass))
5587 element_size = mono_class_array_element_size (eclass);
5588 addr_method = mono_marshal_get_array_address (rank, element_size);
5589 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5594 static MonoBreakPolicy
5595 always_insert_breakpoint (MonoMethod *method)
5597 return MONO_BREAK_POLICY_ALWAYS;
5600 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5603 * mono_set_break_policy:
5604 * policy_callback: the new callback function
5606 * Allow embedders to decide wherther to actually obey breakpoint instructions
5607 * (both break IL instructions and Debugger.Break () method calls), for example
5608 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5609 * untrusted or semi-trusted code.
5611 * @policy_callback will be called every time a break point instruction needs to
5612 * be inserted with the method argument being the method that calls Debugger.Break()
5613 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5614 * if it wants the breakpoint to not be effective in the given method.
5615 * #MONO_BREAK_POLICY_ALWAYS is the default.
5618 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5620 if (policy_callback)
5621 break_policy_func = policy_callback;
5623 break_policy_func = always_insert_breakpoint;
5627 should_insert_brekpoint (MonoMethod *method) {
5628 switch (break_policy_func (method)) {
5629 case MONO_BREAK_POLICY_ALWAYS:
5631 case MONO_BREAK_POLICY_NEVER:
5633 case MONO_BREAK_POLICY_ON_DBG:
5634 g_warning ("mdb no longer supported");
5637 g_warning ("Incorrect value returned from break policy callback");
5642 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5644 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5646 MonoInst *addr, *store, *load;
5647 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5649 /* the bounds check is already done by the callers */
5650 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5653 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5654 if (mini_type_is_reference (fsig->params [2]))
5655 emit_write_barrier (cfg, addr, load);
5657 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5665 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5667 return mini_type_is_reference (&klass->byval_arg);
5671 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5673 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5674 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5675 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5676 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5677 MonoInst *iargs [3];
5680 mono_class_setup_vtable (obj_array);
5681 g_assert (helper->slot);
5683 if (sp [0]->type != STACK_OBJ)
5685 if (sp [2]->type != STACK_OBJ)
5692 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5696 if (mini_is_gsharedvt_variable_klass (klass)) {
5699 // FIXME-VT: OP_ICONST optimization
5700 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5701 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5702 ins->opcode = OP_STOREV_MEMBASE;
5703 } else if (sp [1]->opcode == OP_ICONST) {
5704 int array_reg = sp [0]->dreg;
5705 int index_reg = sp [1]->dreg;
5706 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5708 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5709 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5712 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5715 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5716 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5717 if (generic_class_is_reference_type (cfg, klass))
5718 emit_write_barrier (cfg, addr, sp [2]);
5725 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5730 eklass = mono_class_from_mono_type (fsig->params [2]);
5732 eklass = mono_class_from_mono_type (fsig->ret);
5735 return emit_array_store (cfg, eklass, args, FALSE);
5737 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5738 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5744 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5747 int param_size, return_size;
5749 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5750 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5752 if (cfg->verbose_level > 3)
5753 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5755 //Don't allow mixing reference types with value types
5756 if (param_klass->valuetype != return_klass->valuetype) {
5757 if (cfg->verbose_level > 3)
5758 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5762 if (!param_klass->valuetype) {
5763 if (cfg->verbose_level > 3)
5764 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5769 if (param_klass->has_references || return_klass->has_references)
5772 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5773 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5774 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5775 if (cfg->verbose_level > 3)
5776 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5780 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5781 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5782 if (cfg->verbose_level > 3)
5783 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5787 param_size = mono_class_value_size (param_klass, &align);
5788 return_size = mono_class_value_size (return_klass, &align);
5790 //We can do it if sizes match
5791 if (param_size == return_size) {
5792 if (cfg->verbose_level > 3)
5793 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5797 //No simple way to handle struct if sizes don't match
5798 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5799 if (cfg->verbose_level > 3)
5800 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5805 * Same reg size category.
5806 * A quick note on why we don't require widening here.
5807 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5809 * Since the source value comes from a function argument, the JIT will already have
5810 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5812 if (param_size <= 4 && return_size <= 4) {
5813 if (cfg->verbose_level > 3)
5814 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5822 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5824 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5825 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5827 if (mini_is_gsharedvt_variable_type (fsig->ret))
5830 //Valuetypes that are semantically equivalent or numbers than can be widened to
5831 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5834 //Arrays of valuetypes that are semantically equivalent
5835 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5842 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5844 #ifdef MONO_ARCH_SIMD_INTRINSICS
5845 MonoInst *ins = NULL;
5847 if (cfg->opt & MONO_OPT_SIMD) {
5848 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5854 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5858 emit_memory_barrier (MonoCompile *cfg, int kind)
5860 MonoInst *ins = NULL;
5861 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5862 MONO_ADD_INS (cfg->cbb, ins);
5863 ins->backend.memory_barrier_kind = kind;
5869 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5871 MonoInst *ins = NULL;
5874 /* The LLVM backend supports these intrinsics */
5875 if (cmethod->klass == mono_defaults.math_class) {
5876 if (strcmp (cmethod->name, "Sin") == 0) {
5878 } else if (strcmp (cmethod->name, "Cos") == 0) {
5880 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5882 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5886 if (opcode && fsig->param_count == 1) {
5887 MONO_INST_NEW (cfg, ins, opcode);
5888 ins->type = STACK_R8;
5889 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5890 ins->sreg1 = args [0]->dreg;
5891 MONO_ADD_INS (cfg->cbb, ins);
5895 if (cfg->opt & MONO_OPT_CMOV) {
5896 if (strcmp (cmethod->name, "Min") == 0) {
5897 if (fsig->params [0]->type == MONO_TYPE_I4)
5899 if (fsig->params [0]->type == MONO_TYPE_U4)
5900 opcode = OP_IMIN_UN;
5901 else if (fsig->params [0]->type == MONO_TYPE_I8)
5903 else if (fsig->params [0]->type == MONO_TYPE_U8)
5904 opcode = OP_LMIN_UN;
5905 } else if (strcmp (cmethod->name, "Max") == 0) {
5906 if (fsig->params [0]->type == MONO_TYPE_I4)
5908 if (fsig->params [0]->type == MONO_TYPE_U4)
5909 opcode = OP_IMAX_UN;
5910 else if (fsig->params [0]->type == MONO_TYPE_I8)
5912 else if (fsig->params [0]->type == MONO_TYPE_U8)
5913 opcode = OP_LMAX_UN;
5917 if (opcode && fsig->param_count == 2) {
5918 MONO_INST_NEW (cfg, ins, opcode);
5919 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5920 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5921 ins->sreg1 = args [0]->dreg;
5922 ins->sreg2 = args [1]->dreg;
5923 MONO_ADD_INS (cfg->cbb, ins);
5931 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5933 if (cmethod->klass == mono_defaults.array_class) {
5934 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5935 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5936 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5937 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5938 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5939 return emit_array_unsafe_mov (cfg, fsig, args);
5946 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5948 MonoInst *ins = NULL;
5950 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5952 if (cmethod->klass == mono_defaults.string_class) {
5953 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5954 int dreg = alloc_ireg (cfg);
5955 int index_reg = alloc_preg (cfg);
5956 int add_reg = alloc_preg (cfg);
5958 #if SIZEOF_REGISTER == 8
5959 if (COMPILE_LLVM (cfg)) {
5960 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5962 /* The array reg is 64 bits but the index reg is only 32 */
5963 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5966 index_reg = args [1]->dreg;
5968 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5970 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5971 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5972 add_reg = ins->dreg;
5973 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5976 int mult_reg = alloc_preg (cfg);
5977 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5978 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5979 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5980 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5982 type_from_op (cfg, ins, NULL, NULL);
5984 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5985 int dreg = alloc_ireg (cfg);
5986 /* Decompose later to allow more optimizations */
5987 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5988 ins->type = STACK_I4;
5989 ins->flags |= MONO_INST_FAULT;
5990 cfg->cbb->has_array_access = TRUE;
5991 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5996 } else if (cmethod->klass == mono_defaults.object_class) {
5997 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5998 int dreg = alloc_ireg_ref (cfg);
5999 int vt_reg = alloc_preg (cfg);
6000 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6001 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
6002 type_from_op (cfg, ins, NULL, NULL);
6005 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6006 int dreg = alloc_ireg (cfg);
6007 int t1 = alloc_ireg (cfg);
6009 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6010 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6011 ins->type = STACK_I4;
6014 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6015 MONO_INST_NEW (cfg, ins, OP_NOP);
6016 MONO_ADD_INS (cfg->cbb, ins);
6020 } else if (cmethod->klass == mono_defaults.array_class) {
6021 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6022 return emit_array_generic_access (cfg, fsig, args, FALSE);
6023 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6024 return emit_array_generic_access (cfg, fsig, args, TRUE);
6026 #ifndef MONO_BIG_ARRAYS
6028 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6031 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6032 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6033 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6034 int dreg = alloc_ireg (cfg);
6035 int bounds_reg = alloc_ireg_mp (cfg);
6036 MonoBasicBlock *end_bb, *szarray_bb;
6037 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6039 NEW_BBLOCK (cfg, end_bb);
6040 NEW_BBLOCK (cfg, szarray_bb);
6042 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6043 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6045 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6046 /* Non-szarray case */
6048 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6049 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6052 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6053 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6054 MONO_START_BB (cfg, szarray_bb);
6057 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6058 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6060 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6061 MONO_START_BB (cfg, end_bb);
6063 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6064 ins->type = STACK_I4;
6070 if (cmethod->name [0] != 'g')
6073 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6074 int dreg = alloc_ireg (cfg);
6075 int vtable_reg = alloc_preg (cfg);
6076 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6077 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6079 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6080 type_from_op (cfg, ins, NULL, NULL);
6083 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6084 int dreg = alloc_ireg (cfg);
6086 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6087 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6088 type_from_op (cfg, ins, NULL, NULL);
6093 } else if (cmethod->klass == runtime_helpers_class) {
6094 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6095 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6099 } else if (cmethod->klass == mono_defaults.monitor_class) {
6100 gboolean is_enter = FALSE;
6102 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6107 * To make async stack traces work, icalls which can block should have a wrapper.
6108 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6110 MonoBasicBlock *end_bb;
6112 NEW_BBLOCK (cfg, end_bb);
6114 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter_fast, args);
6115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6116 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6117 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter, args);
6118 MONO_START_BB (cfg, end_bb);
6121 } else if (cmethod->klass == mono_defaults.thread_class) {
6122 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6123 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6124 MONO_ADD_INS (cfg->cbb, ins);
6126 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6127 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6128 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6130 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6132 if (fsig->params [0]->type == MONO_TYPE_I1)
6133 opcode = OP_LOADI1_MEMBASE;
6134 else if (fsig->params [0]->type == MONO_TYPE_U1)
6135 opcode = OP_LOADU1_MEMBASE;
6136 else if (fsig->params [0]->type == MONO_TYPE_I2)
6137 opcode = OP_LOADI2_MEMBASE;
6138 else if (fsig->params [0]->type == MONO_TYPE_U2)
6139 opcode = OP_LOADU2_MEMBASE;
6140 else if (fsig->params [0]->type == MONO_TYPE_I4)
6141 opcode = OP_LOADI4_MEMBASE;
6142 else if (fsig->params [0]->type == MONO_TYPE_U4)
6143 opcode = OP_LOADU4_MEMBASE;
6144 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6145 opcode = OP_LOADI8_MEMBASE;
6146 else if (fsig->params [0]->type == MONO_TYPE_R4)
6147 opcode = OP_LOADR4_MEMBASE;
6148 else if (fsig->params [0]->type == MONO_TYPE_R8)
6149 opcode = OP_LOADR8_MEMBASE;
6150 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6151 opcode = OP_LOAD_MEMBASE;
6154 MONO_INST_NEW (cfg, ins, opcode);
6155 ins->inst_basereg = args [0]->dreg;
6156 ins->inst_offset = 0;
6157 MONO_ADD_INS (cfg->cbb, ins);
6159 switch (fsig->params [0]->type) {
6166 ins->dreg = mono_alloc_ireg (cfg);
6167 ins->type = STACK_I4;
6171 ins->dreg = mono_alloc_lreg (cfg);
6172 ins->type = STACK_I8;
6176 ins->dreg = mono_alloc_ireg (cfg);
6177 #if SIZEOF_REGISTER == 8
6178 ins->type = STACK_I8;
6180 ins->type = STACK_I4;
6185 ins->dreg = mono_alloc_freg (cfg);
6186 ins->type = STACK_R8;
6189 g_assert (mini_type_is_reference (fsig->params [0]));
6190 ins->dreg = mono_alloc_ireg_ref (cfg);
6191 ins->type = STACK_OBJ;
6195 if (opcode == OP_LOADI8_MEMBASE)
6196 ins = mono_decompose_opcode (cfg, ins);
6198 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6202 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6204 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6206 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6207 opcode = OP_STOREI1_MEMBASE_REG;
6208 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6209 opcode = OP_STOREI2_MEMBASE_REG;
6210 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6211 opcode = OP_STOREI4_MEMBASE_REG;
6212 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6213 opcode = OP_STOREI8_MEMBASE_REG;
6214 else if (fsig->params [0]->type == MONO_TYPE_R4)
6215 opcode = OP_STORER4_MEMBASE_REG;
6216 else if (fsig->params [0]->type == MONO_TYPE_R8)
6217 opcode = OP_STORER8_MEMBASE_REG;
6218 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6219 opcode = OP_STORE_MEMBASE_REG;
6222 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6224 MONO_INST_NEW (cfg, ins, opcode);
6225 ins->sreg1 = args [1]->dreg;
6226 ins->inst_destbasereg = args [0]->dreg;
6227 ins->inst_offset = 0;
6228 MONO_ADD_INS (cfg->cbb, ins);
6230 if (opcode == OP_STOREI8_MEMBASE_REG)
6231 ins = mono_decompose_opcode (cfg, ins);
6236 } else if (cmethod->klass->image == mono_defaults.corlib &&
6237 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6238 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6241 #if SIZEOF_REGISTER == 8
6242 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6243 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6244 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6245 ins->dreg = mono_alloc_preg (cfg);
6246 ins->sreg1 = args [0]->dreg;
6247 ins->type = STACK_I8;
6248 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6249 MONO_ADD_INS (cfg->cbb, ins);
6253 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6255 /* 64 bit reads are already atomic */
6256 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6257 load_ins->dreg = mono_alloc_preg (cfg);
6258 load_ins->inst_basereg = args [0]->dreg;
6259 load_ins->inst_offset = 0;
6260 load_ins->type = STACK_I8;
6261 MONO_ADD_INS (cfg->cbb, load_ins);
6263 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6270 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6271 MonoInst *ins_iconst;
6274 if (fsig->params [0]->type == MONO_TYPE_I4) {
6275 opcode = OP_ATOMIC_ADD_I4;
6276 cfg->has_atomic_add_i4 = TRUE;
6278 #if SIZEOF_REGISTER == 8
6279 else if (fsig->params [0]->type == MONO_TYPE_I8)
6280 opcode = OP_ATOMIC_ADD_I8;
6283 if (!mono_arch_opcode_supported (opcode))
6285 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6286 ins_iconst->inst_c0 = 1;
6287 ins_iconst->dreg = mono_alloc_ireg (cfg);
6288 MONO_ADD_INS (cfg->cbb, ins_iconst);
6290 MONO_INST_NEW (cfg, ins, opcode);
6291 ins->dreg = mono_alloc_ireg (cfg);
6292 ins->inst_basereg = args [0]->dreg;
6293 ins->inst_offset = 0;
6294 ins->sreg2 = ins_iconst->dreg;
6295 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6296 MONO_ADD_INS (cfg->cbb, ins);
6298 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6299 MonoInst *ins_iconst;
6302 if (fsig->params [0]->type == MONO_TYPE_I4) {
6303 opcode = OP_ATOMIC_ADD_I4;
6304 cfg->has_atomic_add_i4 = TRUE;
6306 #if SIZEOF_REGISTER == 8
6307 else if (fsig->params [0]->type == MONO_TYPE_I8)
6308 opcode = OP_ATOMIC_ADD_I8;
6311 if (!mono_arch_opcode_supported (opcode))
6313 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6314 ins_iconst->inst_c0 = -1;
6315 ins_iconst->dreg = mono_alloc_ireg (cfg);
6316 MONO_ADD_INS (cfg->cbb, ins_iconst);
6318 MONO_INST_NEW (cfg, ins, opcode);
6319 ins->dreg = mono_alloc_ireg (cfg);
6320 ins->inst_basereg = args [0]->dreg;
6321 ins->inst_offset = 0;
6322 ins->sreg2 = ins_iconst->dreg;
6323 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6324 MONO_ADD_INS (cfg->cbb, ins);
6326 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6329 if (fsig->params [0]->type == MONO_TYPE_I4) {
6330 opcode = OP_ATOMIC_ADD_I4;
6331 cfg->has_atomic_add_i4 = TRUE;
6333 #if SIZEOF_REGISTER == 8
6334 else if (fsig->params [0]->type == MONO_TYPE_I8)
6335 opcode = OP_ATOMIC_ADD_I8;
6338 if (!mono_arch_opcode_supported (opcode))
6340 MONO_INST_NEW (cfg, ins, opcode);
6341 ins->dreg = mono_alloc_ireg (cfg);
6342 ins->inst_basereg = args [0]->dreg;
6343 ins->inst_offset = 0;
6344 ins->sreg2 = args [1]->dreg;
6345 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6346 MONO_ADD_INS (cfg->cbb, ins);
6349 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6350 MonoInst *f2i = NULL, *i2f;
6351 guint32 opcode, f2i_opcode, i2f_opcode;
6352 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6353 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6355 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6356 fsig->params [0]->type == MONO_TYPE_R4) {
6357 opcode = OP_ATOMIC_EXCHANGE_I4;
6358 f2i_opcode = OP_MOVE_F_TO_I4;
6359 i2f_opcode = OP_MOVE_I4_TO_F;
6360 cfg->has_atomic_exchange_i4 = TRUE;
6362 #if SIZEOF_REGISTER == 8
6364 fsig->params [0]->type == MONO_TYPE_I8 ||
6365 fsig->params [0]->type == MONO_TYPE_R8 ||
6366 fsig->params [0]->type == MONO_TYPE_I) {
6367 opcode = OP_ATOMIC_EXCHANGE_I8;
6368 f2i_opcode = OP_MOVE_F_TO_I8;
6369 i2f_opcode = OP_MOVE_I8_TO_F;
6372 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6373 opcode = OP_ATOMIC_EXCHANGE_I4;
6374 cfg->has_atomic_exchange_i4 = TRUE;
6380 if (!mono_arch_opcode_supported (opcode))
6384 /* TODO: Decompose these opcodes instead of bailing here. */
6385 if (COMPILE_SOFT_FLOAT (cfg))
6388 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6389 f2i->dreg = mono_alloc_ireg (cfg);
6390 f2i->sreg1 = args [1]->dreg;
6391 if (f2i_opcode == OP_MOVE_F_TO_I4)
6392 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6393 MONO_ADD_INS (cfg->cbb, f2i);
6396 MONO_INST_NEW (cfg, ins, opcode);
6397 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6398 ins->inst_basereg = args [0]->dreg;
6399 ins->inst_offset = 0;
6400 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6401 MONO_ADD_INS (cfg->cbb, ins);
6403 switch (fsig->params [0]->type) {
6405 ins->type = STACK_I4;
6408 ins->type = STACK_I8;
6411 #if SIZEOF_REGISTER == 8
6412 ins->type = STACK_I8;
6414 ins->type = STACK_I4;
6419 ins->type = STACK_R8;
6422 g_assert (mini_type_is_reference (fsig->params [0]));
6423 ins->type = STACK_OBJ;
6428 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6429 i2f->dreg = mono_alloc_freg (cfg);
6430 i2f->sreg1 = ins->dreg;
6431 i2f->type = STACK_R8;
6432 if (i2f_opcode == OP_MOVE_I4_TO_F)
6433 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6434 MONO_ADD_INS (cfg->cbb, i2f);
6439 if (cfg->gen_write_barriers && is_ref)
6440 emit_write_barrier (cfg, args [0], args [1]);
6442 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6443 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6444 guint32 opcode, f2i_opcode, i2f_opcode;
6445 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6446 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6448 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6449 fsig->params [1]->type == MONO_TYPE_R4) {
6450 opcode = OP_ATOMIC_CAS_I4;
6451 f2i_opcode = OP_MOVE_F_TO_I4;
6452 i2f_opcode = OP_MOVE_I4_TO_F;
6453 cfg->has_atomic_cas_i4 = TRUE;
6455 #if SIZEOF_REGISTER == 8
6457 fsig->params [1]->type == MONO_TYPE_I8 ||
6458 fsig->params [1]->type == MONO_TYPE_R8 ||
6459 fsig->params [1]->type == MONO_TYPE_I) {
6460 opcode = OP_ATOMIC_CAS_I8;
6461 f2i_opcode = OP_MOVE_F_TO_I8;
6462 i2f_opcode = OP_MOVE_I8_TO_F;
6465 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6466 opcode = OP_ATOMIC_CAS_I4;
6467 cfg->has_atomic_cas_i4 = TRUE;
6473 if (!mono_arch_opcode_supported (opcode))
6477 /* TODO: Decompose these opcodes instead of bailing here. */
6478 if (COMPILE_SOFT_FLOAT (cfg))
6481 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6482 f2i_new->dreg = mono_alloc_ireg (cfg);
6483 f2i_new->sreg1 = args [1]->dreg;
6484 if (f2i_opcode == OP_MOVE_F_TO_I4)
6485 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6486 MONO_ADD_INS (cfg->cbb, f2i_new);
6488 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6489 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6490 f2i_cmp->sreg1 = args [2]->dreg;
6491 if (f2i_opcode == OP_MOVE_F_TO_I4)
6492 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6493 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6496 MONO_INST_NEW (cfg, ins, opcode);
6497 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6498 ins->sreg1 = args [0]->dreg;
6499 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6500 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6501 MONO_ADD_INS (cfg->cbb, ins);
6503 switch (fsig->params [1]->type) {
6505 ins->type = STACK_I4;
6508 ins->type = STACK_I8;
6511 #if SIZEOF_REGISTER == 8
6512 ins->type = STACK_I8;
6514 ins->type = STACK_I4;
6518 ins->type = cfg->r4_stack_type;
6521 ins->type = STACK_R8;
6524 g_assert (mini_type_is_reference (fsig->params [1]));
6525 ins->type = STACK_OBJ;
6530 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6531 i2f->dreg = mono_alloc_freg (cfg);
6532 i2f->sreg1 = ins->dreg;
6533 i2f->type = STACK_R8;
6534 if (i2f_opcode == OP_MOVE_I4_TO_F)
6535 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6536 MONO_ADD_INS (cfg->cbb, i2f);
6541 if (cfg->gen_write_barriers && is_ref)
6542 emit_write_barrier (cfg, args [0], args [1]);
6544 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6545 fsig->params [1]->type == MONO_TYPE_I4) {
6546 MonoInst *cmp, *ceq;
6548 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6551 /* int32 r = CAS (location, value, comparand); */
6552 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6553 ins->dreg = alloc_ireg (cfg);
6554 ins->sreg1 = args [0]->dreg;
6555 ins->sreg2 = args [1]->dreg;
6556 ins->sreg3 = args [2]->dreg;
6557 ins->type = STACK_I4;
6558 MONO_ADD_INS (cfg->cbb, ins);
6560 /* bool result = r == comparand; */
6561 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6562 cmp->sreg1 = ins->dreg;
6563 cmp->sreg2 = args [2]->dreg;
6564 cmp->type = STACK_I4;
6565 MONO_ADD_INS (cfg->cbb, cmp);
6567 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6568 ceq->dreg = alloc_ireg (cfg);
6569 ceq->type = STACK_I4;
6570 MONO_ADD_INS (cfg->cbb, ceq);
6572 /* *success = result; */
6573 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6575 cfg->has_atomic_cas_i4 = TRUE;
6577 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6578 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6582 } else if (cmethod->klass->image == mono_defaults.corlib &&
6583 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6584 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6587 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6589 MonoType *t = fsig->params [0];
6591 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6593 g_assert (t->byref);
6594 /* t is a byref type, so the reference check is more complicated */
6595 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6596 if (t->type == MONO_TYPE_I1)
6597 opcode = OP_ATOMIC_LOAD_I1;
6598 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6599 opcode = OP_ATOMIC_LOAD_U1;
6600 else if (t->type == MONO_TYPE_I2)
6601 opcode = OP_ATOMIC_LOAD_I2;
6602 else if (t->type == MONO_TYPE_U2)
6603 opcode = OP_ATOMIC_LOAD_U2;
6604 else if (t->type == MONO_TYPE_I4)
6605 opcode = OP_ATOMIC_LOAD_I4;
6606 else if (t->type == MONO_TYPE_U4)
6607 opcode = OP_ATOMIC_LOAD_U4;
6608 else if (t->type == MONO_TYPE_R4)
6609 opcode = OP_ATOMIC_LOAD_R4;
6610 else if (t->type == MONO_TYPE_R8)
6611 opcode = OP_ATOMIC_LOAD_R8;
6612 #if SIZEOF_REGISTER == 8
6613 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6614 opcode = OP_ATOMIC_LOAD_I8;
6615 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6616 opcode = OP_ATOMIC_LOAD_U8;
6618 else if (t->type == MONO_TYPE_I)
6619 opcode = OP_ATOMIC_LOAD_I4;
6620 else if (is_ref || t->type == MONO_TYPE_U)
6621 opcode = OP_ATOMIC_LOAD_U4;
6625 if (!mono_arch_opcode_supported (opcode))
6628 MONO_INST_NEW (cfg, ins, opcode);
6629 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6630 ins->sreg1 = args [0]->dreg;
6631 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6632 MONO_ADD_INS (cfg->cbb, ins);
6635 case MONO_TYPE_BOOLEAN:
6642 ins->type = STACK_I4;
6646 ins->type = STACK_I8;
6650 #if SIZEOF_REGISTER == 8
6651 ins->type = STACK_I8;
6653 ins->type = STACK_I4;
6657 ins->type = cfg->r4_stack_type;
6660 ins->type = STACK_R8;
6664 ins->type = STACK_OBJ;
6670 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6672 MonoType *t = fsig->params [0];
6675 g_assert (t->byref);
6676 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6677 if (t->type == MONO_TYPE_I1)
6678 opcode = OP_ATOMIC_STORE_I1;
6679 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6680 opcode = OP_ATOMIC_STORE_U1;
6681 else if (t->type == MONO_TYPE_I2)
6682 opcode = OP_ATOMIC_STORE_I2;
6683 else if (t->type == MONO_TYPE_U2)
6684 opcode = OP_ATOMIC_STORE_U2;
6685 else if (t->type == MONO_TYPE_I4)
6686 opcode = OP_ATOMIC_STORE_I4;
6687 else if (t->type == MONO_TYPE_U4)
6688 opcode = OP_ATOMIC_STORE_U4;
6689 else if (t->type == MONO_TYPE_R4)
6690 opcode = OP_ATOMIC_STORE_R4;
6691 else if (t->type == MONO_TYPE_R8)
6692 opcode = OP_ATOMIC_STORE_R8;
6693 #if SIZEOF_REGISTER == 8
6694 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6695 opcode = OP_ATOMIC_STORE_I8;
6696 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6697 opcode = OP_ATOMIC_STORE_U8;
6699 else if (t->type == MONO_TYPE_I)
6700 opcode = OP_ATOMIC_STORE_I4;
6701 else if (is_ref || t->type == MONO_TYPE_U)
6702 opcode = OP_ATOMIC_STORE_U4;
6706 if (!mono_arch_opcode_supported (opcode))
6709 MONO_INST_NEW (cfg, ins, opcode);
6710 ins->dreg = args [0]->dreg;
6711 ins->sreg1 = args [1]->dreg;
6712 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6713 MONO_ADD_INS (cfg->cbb, ins);
6715 if (cfg->gen_write_barriers && is_ref)
6716 emit_write_barrier (cfg, args [0], args [1]);
6722 } else if (cmethod->klass->image == mono_defaults.corlib &&
6723 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6724 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6725 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6726 if (should_insert_brekpoint (cfg->method)) {
6727 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6729 MONO_INST_NEW (cfg, ins, OP_NOP);
6730 MONO_ADD_INS (cfg->cbb, ins);
6734 } else if (cmethod->klass->image == mono_defaults.corlib &&
6735 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6736 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6737 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6739 EMIT_NEW_ICONST (cfg, ins, 1);
6741 EMIT_NEW_ICONST (cfg, ins, 0);
6744 } else if (cmethod->klass->image == mono_defaults.corlib &&
6745 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6746 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6747 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6748 /* No stack walks are currently available, so implement this as an intrinsic */
6749 MonoInst *assembly_ins;
6751 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6752 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6755 } else if (cmethod->klass->image == mono_defaults.corlib &&
6756 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6757 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6758 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6759 /* No stack walks are currently available, so implement this as an intrinsic */
6760 MonoInst *method_ins;
6761 MonoMethod *declaring = cfg->method;
6763 /* This returns the declaring generic method */
6764 if (declaring->is_inflated)
6765 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6766 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6767 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6768 cfg->no_inline = TRUE;
6769 if (cfg->method != cfg->current_method)
6770 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6773 } else if (cmethod->klass == mono_defaults.math_class) {
6775 * There is general branchless code for Min/Max, but it does not work for
6777 * http://everything2.com/?node_id=1051618
6779 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6780 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6781 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6782 !strcmp (cmethod->klass->name, "Selector")) ||
6783 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6784 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6785 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6786 !strcmp (cmethod->klass->name, "Selector"))
6788 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6789 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6790 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6793 MonoJumpInfoToken *ji;
6796 if (args [0]->opcode == OP_GOT_ENTRY) {
6797 pi = (MonoInst *)args [0]->inst_p1;
6798 g_assert (pi->opcode == OP_PATCH_INFO);
6799 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6800 ji = (MonoJumpInfoToken *)pi->inst_p0;
6802 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6803 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6806 NULLIFY_INS (args [0]);
6808 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6809 return_val_if_nok (&cfg->error, NULL);
6811 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6812 ins->dreg = mono_alloc_ireg (cfg);
6815 MONO_ADD_INS (cfg->cbb, ins);
6820 #ifdef MONO_ARCH_SIMD_INTRINSICS
6821 if (cfg->opt & MONO_OPT_SIMD) {
6822 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6828 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6832 if (COMPILE_LLVM (cfg)) {
6833 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6838 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6842 * This entry point could be used later for arbitrary method
6845 inline static MonoInst*
6846 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6847 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6849 if (method->klass == mono_defaults.string_class) {
6850 /* managed string allocation support */
6851 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6852 MonoInst *iargs [2];
6853 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6854 MonoMethod *managed_alloc = NULL;
6856 g_assert (vtable); /*Should not fail since it System.String*/
6857 #ifndef MONO_CROSS_COMPILE
6858 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6862 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6863 iargs [1] = args [0];
6864 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6871 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6873 MonoInst *store, *temp;
6876 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6877 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6880 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6881 * would be different than the MonoInst's used to represent arguments, and
6882 * the ldelema implementation can't deal with that.
6883 * Solution: When ldelema is used on an inline argument, create a var for
6884 * it, emit ldelema on that var, and emit the saving code below in
6885 * inline_method () if needed.
6887 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6888 cfg->args [i] = temp;
6889 /* This uses cfg->args [i] which is set by the preceeding line */
6890 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6891 store->cil_code = sp [0]->cil_code;
6896 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6897 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6899 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6901 check_inline_called_method_name_limit (MonoMethod *called_method)
6904 static const char *limit = NULL;
6906 if (limit == NULL) {
6907 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6909 if (limit_string != NULL)
6910 limit = limit_string;
6915 if (limit [0] != '\0') {
6916 char *called_method_name = mono_method_full_name (called_method, TRUE);
6918 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6919 g_free (called_method_name);
6921 //return (strncmp_result <= 0);
6922 return (strncmp_result == 0);
6929 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6931 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6934 static const char *limit = NULL;
6936 if (limit == NULL) {
6937 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6938 if (limit_string != NULL) {
6939 limit = limit_string;
6945 if (limit [0] != '\0') {
6946 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6948 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6949 g_free (caller_method_name);
6951 //return (strncmp_result <= 0);
6952 return (strncmp_result == 0);
6960 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6962 static double r8_0 = 0.0;
6963 static float r4_0 = 0.0;
6967 rtype = mini_get_underlying_type (rtype);
6971 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6972 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6973 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6974 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6975 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6976 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6977 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6978 ins->type = STACK_R4;
6979 ins->inst_p0 = (void*)&r4_0;
6981 MONO_ADD_INS (cfg->cbb, ins);
6982 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6983 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6984 ins->type = STACK_R8;
6985 ins->inst_p0 = (void*)&r8_0;
6987 MONO_ADD_INS (cfg->cbb, ins);
6988 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6989 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6990 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6991 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6992 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6994 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6999 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7003 rtype = mini_get_underlying_type (rtype);
7007 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7008 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7009 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7010 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7011 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7012 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7013 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7014 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7016 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7017 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7018 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7019 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7020 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7022 emit_init_rvar (cfg, dreg, rtype);
7026 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7028 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7030 MonoInst *var = cfg->locals [local];
7031 if (COMPILE_SOFT_FLOAT (cfg)) {
7033 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7034 emit_init_rvar (cfg, reg, type);
7035 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7038 emit_init_rvar (cfg, var->dreg, type);
7040 emit_dummy_init_rvar (cfg, var->dreg, type);
7047 * Return the cost of inlining CMETHOD.
7050 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7051 guchar *ip, guint real_offset, gboolean inline_always)
7054 MonoInst *ins, *rvar = NULL;
7055 MonoMethodHeader *cheader;
7056 MonoBasicBlock *ebblock, *sbblock;
7058 MonoMethod *prev_inlined_method;
7059 MonoInst **prev_locals, **prev_args;
7060 MonoType **prev_arg_types;
7061 guint prev_real_offset;
7062 GHashTable *prev_cbb_hash;
7063 MonoBasicBlock **prev_cil_offset_to_bb;
7064 MonoBasicBlock *prev_cbb;
7065 unsigned char* prev_cil_start;
7066 guint32 prev_cil_offset_to_bb_len;
7067 MonoMethod *prev_current_method;
7068 MonoGenericContext *prev_generic_context;
7069 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7071 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7073 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7074 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7077 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7078 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7083 fsig = mono_method_signature (cmethod);
7085 if (cfg->verbose_level > 2)
7086 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7088 if (!cmethod->inline_info) {
7089 cfg->stat_inlineable_methods++;
7090 cmethod->inline_info = 1;
7093 /* allocate local variables */
7094 cheader = mono_method_get_header_checked (cmethod, &error);
7096 if (inline_always) {
7097 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7098 mono_error_move (&cfg->error, &error);
7100 mono_error_cleanup (&error);
7105 /*Must verify before creating locals as it can cause the JIT to assert.*/
7106 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7107 mono_metadata_free_mh (cheader);
7111 /* allocate space to store the return value */
7112 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7113 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7116 prev_locals = cfg->locals;
7117 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7118 for (i = 0; i < cheader->num_locals; ++i)
7119 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7121 /* allocate start and end blocks */
7122 /* This is needed so if the inline is aborted, we can clean up */
7123 NEW_BBLOCK (cfg, sbblock);
7124 sbblock->real_offset = real_offset;
7126 NEW_BBLOCK (cfg, ebblock);
7127 ebblock->block_num = cfg->num_bblocks++;
7128 ebblock->real_offset = real_offset;
7130 prev_args = cfg->args;
7131 prev_arg_types = cfg->arg_types;
7132 prev_inlined_method = cfg->inlined_method;
7133 cfg->inlined_method = cmethod;
7134 cfg->ret_var_set = FALSE;
7135 cfg->inline_depth ++;
7136 prev_real_offset = cfg->real_offset;
7137 prev_cbb_hash = cfg->cbb_hash;
7138 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7139 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7140 prev_cil_start = cfg->cil_start;
7141 prev_cbb = cfg->cbb;
7142 prev_current_method = cfg->current_method;
7143 prev_generic_context = cfg->generic_context;
7144 prev_ret_var_set = cfg->ret_var_set;
7145 prev_disable_inline = cfg->disable_inline;
7147 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7150 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7152 ret_var_set = cfg->ret_var_set;
7154 cfg->inlined_method = prev_inlined_method;
7155 cfg->real_offset = prev_real_offset;
7156 cfg->cbb_hash = prev_cbb_hash;
7157 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7158 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7159 cfg->cil_start = prev_cil_start;
7160 cfg->locals = prev_locals;
7161 cfg->args = prev_args;
7162 cfg->arg_types = prev_arg_types;
7163 cfg->current_method = prev_current_method;
7164 cfg->generic_context = prev_generic_context;
7165 cfg->ret_var_set = prev_ret_var_set;
7166 cfg->disable_inline = prev_disable_inline;
7167 cfg->inline_depth --;
7169 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7170 if (cfg->verbose_level > 2)
7171 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7173 cfg->stat_inlined_methods++;
7175 /* always add some code to avoid block split failures */
7176 MONO_INST_NEW (cfg, ins, OP_NOP);
7177 MONO_ADD_INS (prev_cbb, ins);
7179 prev_cbb->next_bb = sbblock;
7180 link_bblock (cfg, prev_cbb, sbblock);
7183 * Get rid of the begin and end bblocks if possible to aid local
7186 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7188 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7189 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7191 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7192 MonoBasicBlock *prev = ebblock->in_bb [0];
7194 if (prev->next_bb == ebblock) {
7195 mono_merge_basic_blocks (cfg, prev, ebblock);
7197 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7198 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7199 cfg->cbb = prev_cbb;
7202 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7207 * Its possible that the rvar is set in some prev bblock, but not in others.
7213 for (i = 0; i < ebblock->in_count; ++i) {
7214 bb = ebblock->in_bb [i];
7216 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7219 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7229 * If the inlined method contains only a throw, then the ret var is not
7230 * set, so set it to a dummy value.
7233 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7235 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7238 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7241 if (cfg->verbose_level > 2)
7242 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7243 cfg->exception_type = MONO_EXCEPTION_NONE;
7245 /* This gets rid of the newly added bblocks */
7246 cfg->cbb = prev_cbb;
7248 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7253 * Some of these comments may well be out-of-date.
7254 * Design decisions: we do a single pass over the IL code (and we do bblock
7255 * splitting/merging in the few cases when it's required: a back jump to an IL
7256 * address that was not already seen as bblock starting point).
7257 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7258 * Complex operations are decomposed in simpler ones right away. We need to let the
7259 * arch-specific code peek and poke inside this process somehow (except when the
7260 * optimizations can take advantage of the full semantic info of coarse opcodes).
7261 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7262 * MonoInst->opcode initially is the IL opcode or some simplification of that
7263 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7264 * opcode with value bigger than OP_LAST.
7265 * At this point the IR can be handed over to an interpreter, a dumb code generator
7266 * or to the optimizing code generator that will translate it to SSA form.
7268 * Profiling directed optimizations.
7269 * We may compile by default with few or no optimizations and instrument the code
7270 * or the user may indicate what methods to optimize the most either in a config file
7271 * or through repeated runs where the compiler applies offline the optimizations to
7272 * each method and then decides if it was worth it.
7275 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7276 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7277 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7278 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7279 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7280 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7281 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7282 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7284 /* offset from br.s -> br like opcodes */
7285 #define BIG_BRANCH_OFFSET 13
7288 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7290 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7292 return b == NULL || b == bb;
7296 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7298 unsigned char *ip = start;
7299 unsigned char *target;
7302 MonoBasicBlock *bblock;
7303 const MonoOpcode *opcode;
7306 cli_addr = ip - start;
7307 i = mono_opcode_value ((const guint8 **)&ip, end);
7310 opcode = &mono_opcodes [i];
7311 switch (opcode->argument) {
7312 case MonoInlineNone:
7315 case MonoInlineString:
7316 case MonoInlineType:
7317 case MonoInlineField:
7318 case MonoInlineMethod:
7321 case MonoShortInlineR:
7328 case MonoShortInlineVar:
7329 case MonoShortInlineI:
7332 case MonoShortInlineBrTarget:
7333 target = start + cli_addr + 2 + (signed char)ip [1];
7334 GET_BBLOCK (cfg, bblock, target);
7337 GET_BBLOCK (cfg, bblock, ip);
7339 case MonoInlineBrTarget:
7340 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7341 GET_BBLOCK (cfg, bblock, target);
7344 GET_BBLOCK (cfg, bblock, ip);
7346 case MonoInlineSwitch: {
7347 guint32 n = read32 (ip + 1);
7350 cli_addr += 5 + 4 * n;
7351 target = start + cli_addr;
7352 GET_BBLOCK (cfg, bblock, target);
7354 for (j = 0; j < n; ++j) {
7355 target = start + cli_addr + (gint32)read32 (ip);
7356 GET_BBLOCK (cfg, bblock, target);
7366 g_assert_not_reached ();
7369 if (i == CEE_THROW) {
7370 unsigned char *bb_start = ip - 1;
7372 /* Find the start of the bblock containing the throw */
7374 while ((bb_start >= start) && !bblock) {
7375 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7379 bblock->out_of_line = 1;
7389 static inline MonoMethod *
7390 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7394 mono_error_init (error);
7396 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7397 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7399 method = mono_class_inflate_generic_method_checked (method, context, error);
7402 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7408 static inline MonoMethod *
7409 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7412 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7414 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7415 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7419 if (!method && !cfg)
7420 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7425 static inline MonoClass*
7426 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7431 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7432 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7434 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7435 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7438 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7439 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7442 mono_class_init (klass);
7446 static inline MonoMethodSignature*
7447 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
7449 MonoMethodSignature *fsig;
7451 mono_error_init (error);
7452 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7453 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7455 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
7456 return_val_if_nok (error, NULL);
7459 fsig = mono_inflate_generic_signature(fsig, context, error);
7465 throw_exception (void)
7467 static MonoMethod *method = NULL;
7470 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7471 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7478 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7480 MonoMethod *thrower = throw_exception ();
7483 EMIT_NEW_PCONST (cfg, args [0], ex);
7484 mono_emit_method_call (cfg, thrower, args, NULL);
7488 * Return the original method is a wrapper is specified. We can only access
7489 * the custom attributes from the original method.
7492 get_original_method (MonoMethod *method)
7494 if (method->wrapper_type == MONO_WRAPPER_NONE)
7497 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7498 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7501 /* in other cases we need to find the original method */
7502 return mono_marshal_method_from_wrapper (method);
7506 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7508 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7509 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7511 emit_throw_exception (cfg, ex);
7515 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7517 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7518 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7520 emit_throw_exception (cfg, ex);
7524 * Check that the IL instructions at ip are the array initialization
7525 * sequence and return the pointer to the data and the size.
7528 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7531 * newarr[System.Int32]
7533 * ldtoken field valuetype ...
7534 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7536 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7538 guint32 token = read32 (ip + 7);
7539 guint32 field_token = read32 (ip + 2);
7540 guint32 field_index = field_token & 0xffffff;
7542 const char *data_ptr;
7544 MonoMethod *cmethod;
7545 MonoClass *dummy_class;
7546 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7550 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7554 *out_field_token = field_token;
7556 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7559 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7561 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7562 case MONO_TYPE_BOOLEAN:
7566 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7567 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7568 case MONO_TYPE_CHAR:
7585 if (size > mono_type_size (field->type, &dummy_align))
7588 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7589 if (!image_is_dynamic (method->klass->image)) {
7590 field_index = read32 (ip + 2) & 0xffffff;
7591 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7592 data_ptr = mono_image_rva_map (method->klass->image, rva);
7593 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7594 /* for aot code we do the lookup on load */
7595 if (aot && data_ptr)
7596 return (const char *)GUINT_TO_POINTER (rva);
7598 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7600 data_ptr = mono_field_get_data (field);
7608 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7611 char *method_fname = mono_method_full_name (method, TRUE);
7613 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7616 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7617 mono_error_cleanup (&error);
7618 } else if (header->code_size == 0)
7619 method_code = g_strdup ("method body is empty.");
7621 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7622 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7623 g_free (method_fname);
7624 g_free (method_code);
7625 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7629 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7632 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7633 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7634 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7635 /* Optimize reg-reg moves away */
7637 * Can't optimize other opcodes, since sp[0] might point to
7638 * the last ins of a decomposed opcode.
7640 sp [0]->dreg = (cfg)->locals [n]->dreg;
7642 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7647 * ldloca inhibits many optimizations so try to get rid of it in common
7650 static inline unsigned char *
7651 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7661 local = read16 (ip + 2);
7665 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7666 /* From the INITOBJ case */
7667 token = read32 (ip + 2);
7668 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7669 CHECK_TYPELOAD (klass);
7670 type = mini_get_underlying_type (&klass->byval_arg);
7671 emit_init_local (cfg, local, type, TRUE);
7679 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7681 MonoInst *icall_args [16];
7682 MonoInst *call_target, *ins, *vtable_ins;
7683 int arg_reg, this_reg, vtable_reg;
7684 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7685 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7686 gboolean variant_iface = FALSE;
7691 * In llvm-only mode, vtables contain function descriptors instead of
7692 * method addresses/trampolines.
7694 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7697 slot = mono_method_get_imt_slot (cmethod);
7699 slot = mono_method_get_vtable_index (cmethod);
7701 this_reg = sp [0]->dreg;
7703 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7704 variant_iface = TRUE;
7706 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7708 * The simplest case, a normal virtual call.
7710 int slot_reg = alloc_preg (cfg);
7711 int addr_reg = alloc_preg (cfg);
7712 int arg_reg = alloc_preg (cfg);
7713 MonoBasicBlock *non_null_bb;
7715 vtable_reg = alloc_preg (cfg);
7716 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7717 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7719 /* Load the vtable slot, which contains a function descriptor. */
7720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7722 NEW_BBLOCK (cfg, non_null_bb);
7724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7725 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7729 // FIXME: Make the wrapper use the preserveall cconv
7730 // FIXME: Use one icall per slot for small slot numbers ?
7731 icall_args [0] = vtable_ins;
7732 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7733 /* Make the icall return the vtable slot value to save some code space */
7734 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7735 ins->dreg = slot_reg;
7736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7739 MONO_START_BB (cfg, non_null_bb);
7740 /* Load the address + arg from the vtable slot */
7741 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7744 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7747 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7749 * A simple interface call
7751 * We make a call through an imt slot to obtain the function descriptor we need to call.
7752 * The imt slot contains a function descriptor for a runtime function + arg.
7754 int slot_reg = alloc_preg (cfg);
7755 int addr_reg = alloc_preg (cfg);
7756 int arg_reg = alloc_preg (cfg);
7757 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7759 vtable_reg = alloc_preg (cfg);
7760 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7761 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7764 * The slot is already initialized when the vtable is created so there is no need
7768 /* Load the imt slot, which contains a function descriptor. */
7769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7771 /* Load the address + arg of the imt thunk from the imt slot */
7772 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7773 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7775 * IMT thunks in llvm-only mode are C functions which take an info argument
7776 * plus the imt method and return the ftndesc to call.
7778 icall_args [0] = thunk_arg_ins;
7779 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7780 cmethod, MONO_RGCTX_INFO_METHOD);
7781 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7783 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7786 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7788 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7789 * dynamically extended as more instantiations are discovered.
7790 * This handles generic virtual methods both on classes and interfaces.
7792 int slot_reg = alloc_preg (cfg);
7793 int addr_reg = alloc_preg (cfg);
7794 int arg_reg = alloc_preg (cfg);
7795 int ftndesc_reg = alloc_preg (cfg);
7796 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7797 MonoBasicBlock *slowpath_bb, *end_bb;
7799 NEW_BBLOCK (cfg, slowpath_bb);
7800 NEW_BBLOCK (cfg, end_bb);
7802 vtable_reg = alloc_preg (cfg);
7803 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7805 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7807 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7809 /* Load the slot, which contains a function descriptor. */
7810 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7812 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7813 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7814 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7815 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7818 /* Same as with iface calls */
7819 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7820 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7821 icall_args [0] = thunk_arg_ins;
7822 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7823 cmethod, MONO_RGCTX_INFO_METHOD);
7824 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7825 ftndesc_ins->dreg = ftndesc_reg;
7827 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7828 * they don't know about yet. Fall back to the slowpath in that case.
7830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7836 MONO_START_BB (cfg, slowpath_bb);
7837 icall_args [0] = vtable_ins;
7838 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7839 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7840 cmethod, MONO_RGCTX_INFO_METHOD);
7842 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7844 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7845 ftndesc_ins->dreg = ftndesc_reg;
7846 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7849 MONO_START_BB (cfg, end_bb);
7850 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7854 * Non-optimized cases
7856 icall_args [0] = sp [0];
7857 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7859 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7860 cmethod, MONO_RGCTX_INFO_METHOD);
7862 arg_reg = alloc_preg (cfg);
7863 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7864 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7866 g_assert (is_gsharedvt);
7868 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7870 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7873 * Pass the extra argument even if the callee doesn't receive it, most
7874 * calling conventions allow this.
7876 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7880 is_exception_class (MonoClass *klass)
7883 if (klass == mono_defaults.exception_class)
7885 klass = klass->parent;
7891 * is_jit_optimizer_disabled:
7893 * Determine whenever M's assembly has a DebuggableAttribute with the
7894 * IsJITOptimizerDisabled flag set.
7897 is_jit_optimizer_disabled (MonoMethod *m)
7900 MonoAssembly *ass = m->klass->image->assembly;
7901 MonoCustomAttrInfo* attrs;
7904 gboolean val = FALSE;
7907 if (ass->jit_optimizer_disabled_inited)
7908 return ass->jit_optimizer_disabled;
7910 klass = mono_class_try_get_debuggable_attribute_class ();
7914 ass->jit_optimizer_disabled = FALSE;
7915 mono_memory_barrier ();
7916 ass->jit_optimizer_disabled_inited = TRUE;
7920 attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
7921 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7923 for (i = 0; i < attrs->num_attrs; ++i) {
7924 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7926 MonoMethodSignature *sig;
7928 if (!attr->ctor || attr->ctor->klass != klass)
7930 /* Decode the attribute. See reflection.c */
7931 p = (const char*)attr->data;
7932 g_assert (read16 (p) == 0x0001);
7935 // FIXME: Support named parameters
7936 sig = mono_method_signature (attr->ctor);
7937 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7939 /* Two boolean arguments */
7943 mono_custom_attrs_free (attrs);
7946 ass->jit_optimizer_disabled = val;
7947 mono_memory_barrier ();
7948 ass->jit_optimizer_disabled_inited = TRUE;
7954 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7956 gboolean supported_tail_call;
7959 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7961 for (i = 0; i < fsig->param_count; ++i) {
7962 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7963 /* These can point to the current method's stack */
7964 supported_tail_call = FALSE;
7966 if (fsig->hasthis && cmethod->klass->valuetype)
7967 /* this might point to the current method's stack */
7968 supported_tail_call = FALSE;
7969 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7970 supported_tail_call = FALSE;
7971 if (cfg->method->save_lmf)
7972 supported_tail_call = FALSE;
7973 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7974 supported_tail_call = FALSE;
7975 if (call_opcode != CEE_CALL)
7976 supported_tail_call = FALSE;
7978 /* Debugging support */
7980 if (supported_tail_call) {
7981 if (!mono_debug_count ())
7982 supported_tail_call = FALSE;
7986 return supported_tail_call;
7992 * Handle calls made to ctors from NEWOBJ opcodes.
7995 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7996 MonoInst **sp, guint8 *ip, int *inline_costs)
7998 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8000 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8001 mono_method_is_generic_sharable (cmethod, TRUE)) {
8002 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8003 mono_class_vtable (cfg->domain, cmethod->klass);
8004 CHECK_TYPELOAD (cmethod->klass);
8006 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8007 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8010 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8011 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8013 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8015 CHECK_TYPELOAD (cmethod->klass);
8016 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8021 /* Avoid virtual calls to ctors if possible */
8022 if (mono_class_is_marshalbyref (cmethod->klass))
8023 callvirt_this_arg = sp [0];
8025 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8026 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8027 CHECK_CFG_EXCEPTION;
8028 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8029 mono_method_check_inlining (cfg, cmethod) &&
8030 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8033 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8034 cfg->real_offset += 5;
8036 *inline_costs += costs - 5;
8038 INLINE_FAILURE ("inline failure");
8039 // FIXME-VT: Clean this up
8040 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8041 GSHAREDVT_FAILURE(*ip);
8042 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8044 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8047 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8049 if (cfg->llvm_only) {
8050 // FIXME: Avoid initializing vtable_arg
8051 emit_llvmonly_calli (cfg, fsig, sp, addr);
8053 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8055 } else if (context_used &&
8056 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8057 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8058 MonoInst *cmethod_addr;
8060 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8062 if (cfg->llvm_only) {
8063 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8064 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8065 emit_llvmonly_calli (cfg, fsig, sp, addr);
8067 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8068 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8070 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8073 INLINE_FAILURE ("ctor call");
8074 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8075 callvirt_this_arg, NULL, vtable_arg);
8082 emit_setret (MonoCompile *cfg, MonoInst *val)
8084 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8087 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8090 if (!cfg->vret_addr) {
8091 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8093 EMIT_NEW_RETLOADA (cfg, ret_addr);
8095 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8096 ins->klass = mono_class_from_mono_type (ret_type);
8099 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8100 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8101 MonoInst *iargs [1];
8105 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8106 mono_arch_emit_setret (cfg, cfg->method, conv);
8108 mono_arch_emit_setret (cfg, cfg->method, val);
8111 mono_arch_emit_setret (cfg, cfg->method, val);
8117 * mono_method_to_ir:
8119 * Translate the .net IL into linear IR.
8122 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8123 MonoInst *return_var, MonoInst **inline_args,
8124 guint inline_offset, gboolean is_virtual_call)
8127 MonoInst *ins, **sp, **stack_start;
8128 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8129 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8130 MonoMethod *cmethod, *method_definition;
8131 MonoInst **arg_array;
8132 MonoMethodHeader *header;
8134 guint32 token, ins_flag;
8136 MonoClass *constrained_class = NULL;
8137 unsigned char *ip, *end, *target, *err_pos;
8138 MonoMethodSignature *sig;
8139 MonoGenericContext *generic_context = NULL;
8140 MonoGenericContainer *generic_container = NULL;
8141 MonoType **param_types;
8142 int i, n, start_new_bblock, dreg;
8143 int num_calls = 0, inline_costs = 0;
8144 int breakpoint_id = 0;
8146 GSList *class_inits = NULL;
8147 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8149 gboolean init_locals, seq_points, skip_dead_blocks;
8150 gboolean sym_seq_points = FALSE;
8151 MonoDebugMethodInfo *minfo;
8152 MonoBitSet *seq_point_locs = NULL;
8153 MonoBitSet *seq_point_set_locs = NULL;
8155 cfg->disable_inline = is_jit_optimizer_disabled (method);
8157 /* serialization and xdomain stuff may need access to private fields and methods */
8158 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8159 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8160 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8161 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8162 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8163 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8165 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8166 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8167 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8168 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8169 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8171 image = method->klass->image;
8172 header = mono_method_get_header_checked (method, &cfg->error);
8174 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8175 goto exception_exit;
8177 generic_container = mono_method_get_generic_container (method);
8178 sig = mono_method_signature (method);
8179 num_args = sig->hasthis + sig->param_count;
8180 ip = (unsigned char*)header->code;
8181 cfg->cil_start = ip;
8182 end = ip + header->code_size;
8183 cfg->stat_cil_code_size += header->code_size;
8185 seq_points = cfg->gen_seq_points && cfg->method == method;
8187 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8188 /* We could hit a seq point before attaching to the JIT (#8338) */
8192 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8193 minfo = mono_debug_lookup_method (method);
8195 MonoSymSeqPoint *sps;
8196 int i, n_il_offsets;
8198 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8199 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8200 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8201 sym_seq_points = TRUE;
8202 for (i = 0; i < n_il_offsets; ++i) {
8203 if (sps [i].il_offset < header->code_size)
8204 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8207 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8208 /* Methods without line number info like auto-generated property accessors */
8209 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8210 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8211 sym_seq_points = TRUE;
8216 * Methods without init_locals set could cause asserts in various passes
8217 * (#497220). To work around this, we emit dummy initialization opcodes
8218 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8219 * on some platforms.
8221 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8222 init_locals = header->init_locals;
8226 method_definition = method;
8227 while (method_definition->is_inflated) {
8228 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8229 method_definition = imethod->declaring;
8232 /* SkipVerification is not allowed if core-clr is enabled */
8233 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8235 dont_verify_stloc = TRUE;
8238 if (sig->is_inflated)
8239 generic_context = mono_method_get_context (method);
8240 else if (generic_container)
8241 generic_context = &generic_container->context;
8242 cfg->generic_context = generic_context;
8245 g_assert (!sig->has_type_parameters);
8247 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8248 g_assert (method->is_inflated);
8249 g_assert (mono_method_get_context (method)->method_inst);
8251 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8252 g_assert (sig->generic_param_count);
8254 if (cfg->method == method) {
8255 cfg->real_offset = 0;
8257 cfg->real_offset = inline_offset;
8260 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8261 cfg->cil_offset_to_bb_len = header->code_size;
8263 cfg->current_method = method;
8265 if (cfg->verbose_level > 2)
8266 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8268 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8270 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8271 for (n = 0; n < sig->param_count; ++n)
8272 param_types [n + sig->hasthis] = sig->params [n];
8273 cfg->arg_types = param_types;
8275 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8276 if (cfg->method == method) {
8278 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8279 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8282 NEW_BBLOCK (cfg, start_bblock);
8283 cfg->bb_entry = start_bblock;
8284 start_bblock->cil_code = NULL;
8285 start_bblock->cil_length = 0;
8288 NEW_BBLOCK (cfg, end_bblock);
8289 cfg->bb_exit = end_bblock;
8290 end_bblock->cil_code = NULL;
8291 end_bblock->cil_length = 0;
8292 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8293 g_assert (cfg->num_bblocks == 2);
8295 arg_array = cfg->args;
8297 if (header->num_clauses) {
8298 cfg->spvars = g_hash_table_new (NULL, NULL);
8299 cfg->exvars = g_hash_table_new (NULL, NULL);
8301 /* handle exception clauses */
8302 for (i = 0; i < header->num_clauses; ++i) {
8303 MonoBasicBlock *try_bb;
8304 MonoExceptionClause *clause = &header->clauses [i];
8305 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8307 try_bb->real_offset = clause->try_offset;
8308 try_bb->try_start = TRUE;
8309 try_bb->region = ((i + 1) << 8) | clause->flags;
8310 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8311 tblock->real_offset = clause->handler_offset;
8312 tblock->flags |= BB_EXCEPTION_HANDLER;
8315 * Linking the try block with the EH block hinders inlining as we won't be able to
8316 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8318 if (COMPILE_LLVM (cfg))
8319 link_bblock (cfg, try_bb, tblock);
8321 if (*(ip + clause->handler_offset) == CEE_POP)
8322 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8324 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8325 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8326 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8327 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8328 MONO_ADD_INS (tblock, ins);
8330 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8331 /* finally clauses already have a seq point */
8332 /* seq points for filter clauses are emitted below */
8333 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8334 MONO_ADD_INS (tblock, ins);
8337 /* todo: is a fault block unsafe to optimize? */
8338 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8339 tblock->flags |= BB_EXCEPTION_UNSAFE;
8342 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8344 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8346 /* catch and filter blocks get the exception object on the stack */
8347 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8348 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8350 /* mostly like handle_stack_args (), but just sets the input args */
8351 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8352 tblock->in_scount = 1;
8353 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8354 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8358 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8359 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8360 if (!cfg->compile_llvm) {
8361 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8362 ins->dreg = tblock->in_stack [0]->dreg;
8363 MONO_ADD_INS (tblock, ins);
8366 MonoInst *dummy_use;
8369 * Add a dummy use for the exvar so its liveness info will be
8372 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8375 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8376 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8377 MONO_ADD_INS (tblock, ins);
8380 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8381 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8382 tblock->flags |= BB_EXCEPTION_HANDLER;
8383 tblock->real_offset = clause->data.filter_offset;
8384 tblock->in_scount = 1;
8385 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8386 /* The filter block shares the exvar with the handler block */
8387 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8388 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8389 MONO_ADD_INS (tblock, ins);
8393 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8394 clause->data.catch_class &&
8396 mono_class_check_context_used (clause->data.catch_class)) {
8398 * In shared generic code with catch
8399 * clauses containing type variables
8400 * the exception handling code has to
8401 * be able to get to the rgctx.
8402 * Therefore we have to make sure that
8403 * the vtable/mrgctx argument (for
8404 * static or generic methods) or the
8405 * "this" argument (for non-static
8406 * methods) are live.
8408 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8409 mini_method_get_context (method)->method_inst ||
8410 method->klass->valuetype) {
8411 mono_get_vtable_var (cfg);
8413 MonoInst *dummy_use;
8415 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8420 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8421 cfg->cbb = start_bblock;
8422 cfg->args = arg_array;
8423 mono_save_args (cfg, sig, inline_args);
8426 /* FIRST CODE BLOCK */
8427 NEW_BBLOCK (cfg, tblock);
8428 tblock->cil_code = ip;
8432 ADD_BBLOCK (cfg, tblock);
8434 if (cfg->method == method) {
8435 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8436 if (breakpoint_id) {
8437 MONO_INST_NEW (cfg, ins, OP_BREAK);
8438 MONO_ADD_INS (cfg->cbb, ins);
8442 /* we use a separate basic block for the initialization code */
8443 NEW_BBLOCK (cfg, init_localsbb);
8444 cfg->bb_init = init_localsbb;
8445 init_localsbb->real_offset = cfg->real_offset;
8446 start_bblock->next_bb = init_localsbb;
8447 init_localsbb->next_bb = cfg->cbb;
8448 link_bblock (cfg, start_bblock, init_localsbb);
8449 link_bblock (cfg, init_localsbb, cfg->cbb);
8451 cfg->cbb = init_localsbb;
8453 if (cfg->gsharedvt && cfg->method == method) {
8454 MonoGSharedVtMethodInfo *info;
8455 MonoInst *var, *locals_var;
8458 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8459 info->method = cfg->method;
8460 info->count_entries = 16;
8461 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8462 cfg->gsharedvt_info = info;
8464 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8465 /* prevent it from being register allocated */
8466 //var->flags |= MONO_INST_VOLATILE;
8467 cfg->gsharedvt_info_var = var;
8469 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8472 /* Allocate locals */
8473 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8474 /* prevent it from being register allocated */
8475 //locals_var->flags |= MONO_INST_VOLATILE;
8476 cfg->gsharedvt_locals_var = locals_var;
8478 dreg = alloc_ireg (cfg);
8479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8481 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8482 ins->dreg = locals_var->dreg;
8484 MONO_ADD_INS (cfg->cbb, ins);
8485 cfg->gsharedvt_locals_var_ins = ins;
8487 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8490 ins->flags |= MONO_INST_INIT;
8494 if (mono_security_core_clr_enabled ()) {
8495 /* check if this is native code, e.g. an icall or a p/invoke */
8496 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8497 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8499 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8500 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8502 /* if this ia a native call then it can only be JITted from platform code */
8503 if ((icall || pinvk) && method->klass && method->klass->image) {
8504 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8505 MonoException *ex = icall ? mono_get_exception_security () :
8506 mono_get_exception_method_access ();
8507 emit_throw_exception (cfg, ex);
8514 CHECK_CFG_EXCEPTION;
8516 if (header->code_size == 0)
8519 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8524 if (cfg->method == method)
8525 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8527 for (n = 0; n < header->num_locals; ++n) {
8528 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8533 /* We force the vtable variable here for all shared methods
8534 for the possibility that they might show up in a stack
8535 trace where their exact instantiation is needed. */
8536 if (cfg->gshared && method == cfg->method) {
8537 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8538 mini_method_get_context (method)->method_inst ||
8539 method->klass->valuetype) {
8540 mono_get_vtable_var (cfg);
8542 /* FIXME: Is there a better way to do this?
8543 We need the variable live for the duration
8544 of the whole method. */
8545 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8549 /* add a check for this != NULL to inlined methods */
8550 if (is_virtual_call) {
8553 NEW_ARGLOAD (cfg, arg_ins, 0);
8554 MONO_ADD_INS (cfg->cbb, arg_ins);
8555 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8558 skip_dead_blocks = !dont_verify;
8559 if (skip_dead_blocks) {
8560 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
8565 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8566 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8569 start_new_bblock = 0;
8571 if (cfg->method == method)
8572 cfg->real_offset = ip - header->code;
8574 cfg->real_offset = inline_offset;
8579 if (start_new_bblock) {
8580 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8581 if (start_new_bblock == 2) {
8582 g_assert (ip == tblock->cil_code);
8584 GET_BBLOCK (cfg, tblock, ip);
8586 cfg->cbb->next_bb = tblock;
8588 start_new_bblock = 0;
8589 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8590 if (cfg->verbose_level > 3)
8591 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8592 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8596 g_slist_free (class_inits);
8599 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8600 link_bblock (cfg, cfg->cbb, tblock);
8601 if (sp != stack_start) {
8602 handle_stack_args (cfg, stack_start, sp - stack_start);
8604 CHECK_UNVERIFIABLE (cfg);
8606 cfg->cbb->next_bb = tblock;
8608 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8609 if (cfg->verbose_level > 3)
8610 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8611 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8614 g_slist_free (class_inits);
8619 if (skip_dead_blocks) {
8620 int ip_offset = ip - header->code;
8622 if (ip_offset == bb->end)
8626 int op_size = mono_opcode_size (ip, end);
8627 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8629 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8631 if (ip_offset + op_size == bb->end) {
8632 MONO_INST_NEW (cfg, ins, OP_NOP);
8633 MONO_ADD_INS (cfg->cbb, ins);
8634 start_new_bblock = 1;
8642 * Sequence points are points where the debugger can place a breakpoint.
8643 * Currently, we generate these automatically at points where the IL
8646 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8648 * Make methods interruptable at the beginning, and at the targets of
8649 * backward branches.
8650 * Also, do this at the start of every bblock in methods with clauses too,
8651 * to be able to handle instructions with inprecise control flow like
8653 * Backward branches are handled at the end of method-to-ir ().
8655 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8656 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8658 /* Avoid sequence points on empty IL like .volatile */
8659 // FIXME: Enable this
8660 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8661 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8662 if ((sp != stack_start) && !sym_seq_point)
8663 ins->flags |= MONO_INST_NONEMPTY_STACK;
8664 MONO_ADD_INS (cfg->cbb, ins);
8667 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8670 cfg->cbb->real_offset = cfg->real_offset;
8672 if ((cfg->method == method) && cfg->coverage_info) {
8673 guint32 cil_offset = ip - header->code;
8674 cfg->coverage_info->data [cil_offset].cil_code = ip;
8676 /* TODO: Use an increment here */
8677 #if defined(TARGET_X86)
8678 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8679 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8681 MONO_ADD_INS (cfg->cbb, ins);
8683 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8684 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8688 if (cfg->verbose_level > 3)
8689 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8693 if (seq_points && !sym_seq_points && sp != stack_start) {
8695 * The C# compiler uses these nops to notify the JIT that it should
8696 * insert seq points.
8698 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8699 MONO_ADD_INS (cfg->cbb, ins);
8701 if (cfg->keep_cil_nops)
8702 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8704 MONO_INST_NEW (cfg, ins, OP_NOP);
8706 MONO_ADD_INS (cfg->cbb, ins);
8709 if (should_insert_brekpoint (cfg->method)) {
8710 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8712 MONO_INST_NEW (cfg, ins, OP_NOP);
8715 MONO_ADD_INS (cfg->cbb, ins);
8721 CHECK_STACK_OVF (1);
8722 n = (*ip)-CEE_LDARG_0;
8724 EMIT_NEW_ARGLOAD (cfg, ins, n);
8732 CHECK_STACK_OVF (1);
8733 n = (*ip)-CEE_LDLOC_0;
8735 EMIT_NEW_LOCLOAD (cfg, ins, n);
8744 n = (*ip)-CEE_STLOC_0;
8747 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8749 emit_stloc_ir (cfg, sp, header, n);
8756 CHECK_STACK_OVF (1);
8759 EMIT_NEW_ARGLOAD (cfg, ins, n);
8765 CHECK_STACK_OVF (1);
8768 NEW_ARGLOADA (cfg, ins, n);
8769 MONO_ADD_INS (cfg->cbb, ins);
8779 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8781 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8786 CHECK_STACK_OVF (1);
8789 EMIT_NEW_LOCLOAD (cfg, ins, n);
8793 case CEE_LDLOCA_S: {
8794 unsigned char *tmp_ip;
8796 CHECK_STACK_OVF (1);
8797 CHECK_LOCAL (ip [1]);
8799 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8805 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8814 CHECK_LOCAL (ip [1]);
8815 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8817 emit_stloc_ir (cfg, sp, header, ip [1]);
8822 CHECK_STACK_OVF (1);
8823 EMIT_NEW_PCONST (cfg, ins, NULL);
8824 ins->type = STACK_OBJ;
8829 CHECK_STACK_OVF (1);
8830 EMIT_NEW_ICONST (cfg, ins, -1);
8843 CHECK_STACK_OVF (1);
8844 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8850 CHECK_STACK_OVF (1);
8852 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8858 CHECK_STACK_OVF (1);
8859 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8865 CHECK_STACK_OVF (1);
8866 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8867 ins->type = STACK_I8;
8868 ins->dreg = alloc_dreg (cfg, STACK_I8);
8870 ins->inst_l = (gint64)read64 (ip);
8871 MONO_ADD_INS (cfg->cbb, ins);
8877 gboolean use_aotconst = FALSE;
8879 #ifdef TARGET_POWERPC
8880 /* FIXME: Clean this up */
8881 if (cfg->compile_aot)
8882 use_aotconst = TRUE;
8885 /* FIXME: we should really allocate this only late in the compilation process */
8886 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8888 CHECK_STACK_OVF (1);
8894 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8896 dreg = alloc_freg (cfg);
8897 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8898 ins->type = cfg->r4_stack_type;
8900 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8901 ins->type = cfg->r4_stack_type;
8902 ins->dreg = alloc_dreg (cfg, STACK_R8);
8904 MONO_ADD_INS (cfg->cbb, ins);
8914 gboolean use_aotconst = FALSE;
8916 #ifdef TARGET_POWERPC
8917 /* FIXME: Clean this up */
8918 if (cfg->compile_aot)
8919 use_aotconst = TRUE;
8922 /* FIXME: we should really allocate this only late in the compilation process */
8923 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8925 CHECK_STACK_OVF (1);
8931 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8933 dreg = alloc_freg (cfg);
8934 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8935 ins->type = STACK_R8;
8937 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8938 ins->type = STACK_R8;
8939 ins->dreg = alloc_dreg (cfg, STACK_R8);
8941 MONO_ADD_INS (cfg->cbb, ins);
8950 MonoInst *temp, *store;
8952 CHECK_STACK_OVF (1);
8956 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8957 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8959 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8962 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8975 if (sp [0]->type == STACK_R8)
8976 /* we need to pop the value from the x86 FP stack */
8977 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8982 MonoMethodSignature *fsig;
8985 INLINE_FAILURE ("jmp");
8986 GSHAREDVT_FAILURE (*ip);
8989 if (stack_start != sp)
8991 token = read32 (ip + 1);
8992 /* FIXME: check the signature matches */
8993 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8996 if (cfg->gshared && mono_method_check_context_used (cmethod))
8997 GENERIC_SHARING_FAILURE (CEE_JMP);
8999 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9001 fsig = mono_method_signature (cmethod);
9002 n = fsig->param_count + fsig->hasthis;
9003 if (cfg->llvm_only) {
9006 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9007 for (i = 0; i < n; ++i)
9008 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9009 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9011 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9012 * have to emit a normal return since llvm expects it.
9015 emit_setret (cfg, ins);
9016 MONO_INST_NEW (cfg, ins, OP_BR);
9017 ins->inst_target_bb = end_bblock;
9018 MONO_ADD_INS (cfg->cbb, ins);
9019 link_bblock (cfg, cfg->cbb, end_bblock);
9022 } else if (cfg->backend->have_op_tail_call) {
9023 /* Handle tail calls similarly to calls */
9026 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9027 call->method = cmethod;
9028 call->tail_call = TRUE;
9029 call->signature = mono_method_signature (cmethod);
9030 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9031 call->inst.inst_p0 = cmethod;
9032 for (i = 0; i < n; ++i)
9033 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9035 mono_arch_emit_call (cfg, call);
9036 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9037 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9039 for (i = 0; i < num_args; ++i)
9040 /* Prevent arguments from being optimized away */
9041 arg_array [i]->flags |= MONO_INST_VOLATILE;
9043 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9044 ins = (MonoInst*)call;
9045 ins->inst_p0 = cmethod;
9046 MONO_ADD_INS (cfg->cbb, ins);
9050 start_new_bblock = 1;
9055 MonoMethodSignature *fsig;
9058 token = read32 (ip + 1);
9062 //GSHAREDVT_FAILURE (*ip);
9067 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
9070 if (method->dynamic && fsig->pinvoke) {
9074 * This is a call through a function pointer using a pinvoke
9075 * signature. Have to create a wrapper and call that instead.
9076 * FIXME: This is very slow, need to create a wrapper at JIT time
9077 * instead based on the signature.
9079 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9080 EMIT_NEW_PCONST (cfg, args [1], fsig);
9082 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9085 n = fsig->param_count + fsig->hasthis;
9089 //g_assert (!virtual_ || fsig->hasthis);
9093 inline_costs += 10 * num_calls++;
9096 * Making generic calls out of gsharedvt methods.
9097 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9098 * patching gshared method addresses into a gsharedvt method.
9100 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9102 * We pass the address to the gsharedvt trampoline in the rgctx reg
9104 MonoInst *callee = addr;
9106 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9108 GSHAREDVT_FAILURE (*ip);
9112 GSHAREDVT_FAILURE (*ip);
9114 addr = emit_get_rgctx_sig (cfg, context_used,
9115 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9116 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9120 /* Prevent inlining of methods with indirect calls */
9121 INLINE_FAILURE ("indirect call");
9123 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9124 MonoJumpInfoType info_type;
9128 * Instead of emitting an indirect call, emit a direct call
9129 * with the contents of the aotconst as the patch info.
9131 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9132 info_type = (MonoJumpInfoType)addr->inst_c1;
9133 info_data = addr->inst_p0;
9135 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9136 info_data = addr->inst_right->inst_left;
9139 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
9140 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
9143 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9144 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9149 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9153 /* End of call, INS should contain the result of the call, if any */
9155 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9157 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9160 CHECK_CFG_EXCEPTION;
9164 constrained_class = NULL;
9168 case CEE_CALLVIRT: {
9169 MonoInst *addr = NULL;
9170 MonoMethodSignature *fsig = NULL;
9172 int virtual_ = *ip == CEE_CALLVIRT;
9173 gboolean pass_imt_from_rgctx = FALSE;
9174 MonoInst *imt_arg = NULL;
9175 MonoInst *keep_this_alive = NULL;
9176 gboolean pass_vtable = FALSE;
9177 gboolean pass_mrgctx = FALSE;
9178 MonoInst *vtable_arg = NULL;
9179 gboolean check_this = FALSE;
9180 gboolean supported_tail_call = FALSE;
9181 gboolean tail_call = FALSE;
9182 gboolean need_seq_point = FALSE;
9183 guint32 call_opcode = *ip;
9184 gboolean emit_widen = TRUE;
9185 gboolean push_res = TRUE;
9186 gboolean skip_ret = FALSE;
9187 gboolean delegate_invoke = FALSE;
9188 gboolean direct_icall = FALSE;
9189 gboolean constrained_partial_call = FALSE;
9190 MonoMethod *cil_method;
9193 token = read32 (ip + 1);
9197 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9200 cil_method = cmethod;
9202 if (constrained_class) {
9203 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9204 if (!mini_is_gsharedvt_klass (constrained_class)) {
9205 g_assert (!cmethod->klass->valuetype);
9206 if (!mini_type_is_reference (&constrained_class->byval_arg))
9207 constrained_partial_call = TRUE;
9211 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9212 if (cfg->verbose_level > 2)
9213 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9214 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9215 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9217 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9221 if (cfg->verbose_level > 2)
9222 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9224 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9226 * This is needed since get_method_constrained can't find
9227 * the method in klass representing a type var.
9228 * The type var is guaranteed to be a reference type in this
9231 if (!mini_is_gsharedvt_klass (constrained_class))
9232 g_assert (!cmethod->klass->valuetype);
9234 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9240 if (!dont_verify && !cfg->skip_visibility) {
9241 MonoMethod *target_method = cil_method;
9242 if (method->is_inflated) {
9243 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9246 if (!mono_method_can_access_method (method_definition, target_method) &&
9247 !mono_method_can_access_method (method, cil_method))
9248 emit_method_access_failure (cfg, method, cil_method);
9251 if (mono_security_core_clr_enabled ())
9252 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9254 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9255 /* MS.NET seems to silently convert this to a callvirt */
9260 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9261 * converts to a callvirt.
9263 * tests/bug-515884.il is an example of this behavior
9265 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9266 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9267 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9271 if (!cmethod->klass->inited)
9272 if (!mono_class_init (cmethod->klass))
9273 TYPE_LOAD_ERROR (cmethod->klass);
9275 fsig = mono_method_signature (cmethod);
9278 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9279 mini_class_is_system_array (cmethod->klass)) {
9280 array_rank = cmethod->klass->rank;
9281 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9282 direct_icall = TRUE;
9283 } else if (fsig->pinvoke) {
9284 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9285 fsig = mono_method_signature (wrapper);
9286 } else if (constrained_class) {
9288 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9292 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
9293 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9295 /* See code below */
9296 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9297 MonoBasicBlock *tbb;
9299 GET_BBLOCK (cfg, tbb, ip + 5);
9300 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9302 * We want to extend the try block to cover the call, but we can't do it if the
9303 * call is made directly since its followed by an exception check.
9305 direct_icall = FALSE;
9309 mono_save_token_info (cfg, image, token, cil_method);
9311 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9312 need_seq_point = TRUE;
9314 /* Don't support calls made using type arguments for now */
9316 if (cfg->gsharedvt) {
9317 if (mini_is_gsharedvt_signature (fsig))
9318 GSHAREDVT_FAILURE (*ip);
9322 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9323 g_assert_not_reached ();
9325 n = fsig->param_count + fsig->hasthis;
9327 if (!cfg->gshared && cmethod->klass->generic_container)
9331 g_assert (!mono_method_check_context_used (cmethod));
9335 //g_assert (!virtual_ || fsig->hasthis);
9340 * We have the `constrained.' prefix opcode.
9342 if (constrained_class) {
9343 if (mini_is_gsharedvt_klass (constrained_class)) {
9344 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9345 /* The 'Own method' case below */
9346 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9347 /* 'The type parameter is instantiated as a reference type' case below. */
9349 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9350 CHECK_CFG_EXCEPTION;
9356 if (constrained_partial_call) {
9357 gboolean need_box = TRUE;
9360 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9361 * called method is not known at compile time either. The called method could end up being
9362 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9363 * to box the receiver.
9364 * A simple solution would be to box always and make a normal virtual call, but that would
9365 * be bad performance wise.
9367 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9369 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9374 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9375 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9376 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9377 ins->klass = constrained_class;
9378 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9379 CHECK_CFG_EXCEPTION;
9380 } else if (need_box) {
9382 MonoBasicBlock *is_ref_bb, *end_bb;
9383 MonoInst *nonbox_call;
9386 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9388 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9389 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9391 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9393 NEW_BBLOCK (cfg, is_ref_bb);
9394 NEW_BBLOCK (cfg, end_bb);
9396 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9401 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9406 MONO_START_BB (cfg, is_ref_bb);
9407 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9408 ins->klass = constrained_class;
9409 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9410 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9412 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9414 MONO_START_BB (cfg, end_bb);
9417 nonbox_call->dreg = ins->dreg;
9420 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9421 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9422 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9425 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9427 * The type parameter is instantiated as a valuetype,
9428 * but that type doesn't override the method we're
9429 * calling, so we need to box `this'.
9431 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9432 ins->klass = constrained_class;
9433 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9434 CHECK_CFG_EXCEPTION;
9435 } else if (!constrained_class->valuetype) {
9436 int dreg = alloc_ireg_ref (cfg);
9439 * The type parameter is instantiated as a reference
9440 * type. We have a managed pointer on the stack, so
9441 * we need to dereference it here.
9443 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9444 ins->type = STACK_OBJ;
9447 if (cmethod->klass->valuetype) {
9450 /* Interface method */
9453 mono_class_setup_vtable (constrained_class);
9454 CHECK_TYPELOAD (constrained_class);
9455 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9457 TYPE_LOAD_ERROR (constrained_class);
9458 slot = mono_method_get_vtable_slot (cmethod);
9460 TYPE_LOAD_ERROR (cmethod->klass);
9461 cmethod = constrained_class->vtable [ioffset + slot];
9463 if (cmethod->klass == mono_defaults.enum_class) {
9464 /* Enum implements some interfaces, so treat this as the first case */
9465 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9466 ins->klass = constrained_class;
9467 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9468 CHECK_CFG_EXCEPTION;
9473 constrained_class = NULL;
9476 if (check_call_signature (cfg, fsig, sp))
9479 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9480 delegate_invoke = TRUE;
9482 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9483 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9484 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9492 * If the callee is a shared method, then its static cctor
9493 * might not get called after the call was patched.
9495 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9496 emit_class_init (cfg, cmethod->klass);
9497 CHECK_TYPELOAD (cmethod->klass);
9500 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9503 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9505 context_used = mini_method_check_context_used (cfg, cmethod);
9507 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9508 /* Generic method interface
9509 calls are resolved via a
9510 helper function and don't
9512 if (!cmethod_context || !cmethod_context->method_inst)
9513 pass_imt_from_rgctx = TRUE;
9517 * If a shared method calls another
9518 * shared method then the caller must
9519 * have a generic sharing context
9520 * because the magic trampoline
9521 * requires it. FIXME: We shouldn't
9522 * have to force the vtable/mrgctx
9523 * variable here. Instead there
9524 * should be a flag in the cfg to
9525 * request a generic sharing context.
9528 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9529 mono_get_vtable_var (cfg);
9534 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9536 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9538 CHECK_TYPELOAD (cmethod->klass);
9539 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9544 g_assert (!vtable_arg);
9546 if (!cfg->compile_aot) {
9548 * emit_get_rgctx_method () calls mono_class_vtable () so check
9549 * for type load errors before.
9551 mono_class_setup_vtable (cmethod->klass);
9552 CHECK_TYPELOAD (cmethod->klass);
9555 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9557 /* !marshalbyref is needed to properly handle generic methods + remoting */
9558 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9559 MONO_METHOD_IS_FINAL (cmethod)) &&
9560 !mono_class_is_marshalbyref (cmethod->klass)) {
9567 if (pass_imt_from_rgctx) {
9568 g_assert (!pass_vtable);
9570 imt_arg = emit_get_rgctx_method (cfg, context_used,
9571 cmethod, MONO_RGCTX_INFO_METHOD);
9575 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9577 /* Calling virtual generic methods */
9578 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9579 !(MONO_METHOD_IS_FINAL (cmethod) &&
9580 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9581 fsig->generic_param_count &&
9582 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9584 MonoInst *this_temp, *this_arg_temp, *store;
9585 MonoInst *iargs [4];
9587 g_assert (fsig->is_inflated);
9589 /* Prevent inlining of methods that contain indirect calls */
9590 INLINE_FAILURE ("virtual generic call");
9592 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9593 GSHAREDVT_FAILURE (*ip);
9595 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9596 g_assert (!imt_arg);
9598 g_assert (cmethod->is_inflated);
9599 imt_arg = emit_get_rgctx_method (cfg, context_used,
9600 cmethod, MONO_RGCTX_INFO_METHOD);
9601 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9603 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9604 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9605 MONO_ADD_INS (cfg->cbb, store);
9607 /* FIXME: This should be a managed pointer */
9608 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9610 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9611 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9612 cmethod, MONO_RGCTX_INFO_METHOD);
9613 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9614 addr = mono_emit_jit_icall (cfg,
9615 mono_helper_compile_generic_method, iargs);
9617 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9619 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9626 * Implement a workaround for the inherent races involved in locking:
9632 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9633 * try block, the Exit () won't be executed, see:
9634 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9635 * To work around this, we extend such try blocks to include the last x bytes
9636 * of the Monitor.Enter () call.
9638 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9639 MonoBasicBlock *tbb;
9641 GET_BBLOCK (cfg, tbb, ip + 5);
9643 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9644 * from Monitor.Enter like ArgumentNullException.
9646 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9647 /* Mark this bblock as needing to be extended */
9648 tbb->extend_try_block = TRUE;
9652 /* Conversion to a JIT intrinsic */
9653 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9654 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9655 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9663 if ((cfg->opt & MONO_OPT_INLINE) &&
9664 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9665 mono_method_check_inlining (cfg, cmethod)) {
9667 gboolean always = FALSE;
9669 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9670 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9671 /* Prevent inlining of methods that call wrappers */
9672 INLINE_FAILURE ("wrapper call");
9673 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9677 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9679 cfg->real_offset += 5;
9681 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9682 /* *sp is already set by inline_method */
9687 inline_costs += costs;
9693 /* Tail recursion elimination */
9694 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9695 gboolean has_vtargs = FALSE;
9698 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9699 INLINE_FAILURE ("tail call");
9701 /* keep it simple */
9702 for (i = fsig->param_count - 1; i >= 0; i--) {
9703 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9708 for (i = 0; i < n; ++i)
9709 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9710 MONO_INST_NEW (cfg, ins, OP_BR);
9711 MONO_ADD_INS (cfg->cbb, ins);
9712 tblock = start_bblock->out_bb [0];
9713 link_bblock (cfg, cfg->cbb, tblock);
9714 ins->inst_target_bb = tblock;
9715 start_new_bblock = 1;
9717 /* skip the CEE_RET, too */
9718 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9725 inline_costs += 10 * num_calls++;
9728 * Making generic calls out of gsharedvt methods.
9729 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9730 * patching gshared method addresses into a gsharedvt method.
9732 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9733 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9734 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9735 MonoRgctxInfoType info_type;
9738 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9739 //GSHAREDVT_FAILURE (*ip);
9740 // disable for possible remoting calls
9741 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9742 GSHAREDVT_FAILURE (*ip);
9743 if (fsig->generic_param_count) {
9744 /* virtual generic call */
9745 g_assert (!imt_arg);
9746 /* Same as the virtual generic case above */
9747 imt_arg = emit_get_rgctx_method (cfg, context_used,
9748 cmethod, MONO_RGCTX_INFO_METHOD);
9749 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9751 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9752 /* This can happen when we call a fully instantiated iface method */
9753 imt_arg = emit_get_rgctx_method (cfg, context_used,
9754 cmethod, MONO_RGCTX_INFO_METHOD);
9759 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9760 keep_this_alive = sp [0];
9762 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9763 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9765 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9766 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9768 if (cfg->llvm_only) {
9769 // FIXME: Avoid initializing vtable_arg
9770 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9772 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9777 /* Generic sharing */
9780 * Use this if the callee is gsharedvt sharable too, since
9781 * at runtime we might find an instantiation so the call cannot
9782 * be patched (the 'no_patch' code path in mini-trampolines.c).
9784 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9785 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9786 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9787 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9788 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9789 INLINE_FAILURE ("gshared");
9791 g_assert (cfg->gshared && cmethod);
9795 * We are compiling a call to a
9796 * generic method from shared code,
9797 * which means that we have to look up
9798 * the method in the rgctx and do an
9802 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9804 if (cfg->llvm_only) {
9805 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9806 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9808 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9809 // FIXME: Avoid initializing imt_arg/vtable_arg
9810 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9812 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9813 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9818 /* Direct calls to icalls */
9820 MonoMethod *wrapper;
9823 /* Inline the wrapper */
9824 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9826 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9827 g_assert (costs > 0);
9828 cfg->real_offset += 5;
9830 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9831 /* *sp is already set by inline_method */
9836 inline_costs += costs;
9845 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9846 MonoInst *val = sp [fsig->param_count];
9848 if (val->type == STACK_OBJ) {
9849 MonoInst *iargs [2];
9854 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9857 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9858 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9859 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9860 emit_write_barrier (cfg, addr, val);
9861 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9862 GSHAREDVT_FAILURE (*ip);
9863 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9864 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9866 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9867 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9868 if (!cmethod->klass->element_class->valuetype && !readonly)
9869 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9870 CHECK_TYPELOAD (cmethod->klass);
9873 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9876 g_assert_not_reached ();
9883 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9887 /* Tail prefix / tail call optimization */
9889 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9890 /* FIXME: runtime generic context pointer for jumps? */
9891 /* FIXME: handle this for generic sharing eventually */
9892 if ((ins_flag & MONO_INST_TAILCALL) &&
9893 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9894 supported_tail_call = TRUE;
9896 if (supported_tail_call) {
9899 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9900 INLINE_FAILURE ("tail call");
9902 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9904 if (cfg->backend->have_op_tail_call) {
9905 /* Handle tail calls similarly to normal calls */
9908 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9910 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9911 call->tail_call = TRUE;
9912 call->method = cmethod;
9913 call->signature = mono_method_signature (cmethod);
9916 * We implement tail calls by storing the actual arguments into the
9917 * argument variables, then emitting a CEE_JMP.
9919 for (i = 0; i < n; ++i) {
9920 /* Prevent argument from being register allocated */
9921 arg_array [i]->flags |= MONO_INST_VOLATILE;
9922 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9924 ins = (MonoInst*)call;
9925 ins->inst_p0 = cmethod;
9926 ins->inst_p1 = arg_array [0];
9927 MONO_ADD_INS (cfg->cbb, ins);
9928 link_bblock (cfg, cfg->cbb, end_bblock);
9929 start_new_bblock = 1;
9931 // FIXME: Eliminate unreachable epilogs
9934 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9935 * only reachable from this call.
9937 GET_BBLOCK (cfg, tblock, ip + 5);
9938 if (tblock == cfg->cbb || tblock->in_count == 0)
9947 * Synchronized wrappers.
9948 * Its hard to determine where to replace a method with its synchronized
9949 * wrapper without causing an infinite recursion. The current solution is
9950 * to add the synchronized wrapper in the trampolines, and to
9951 * change the called method to a dummy wrapper, and resolve that wrapper
9952 * to the real method in mono_jit_compile_method ().
9954 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9955 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9956 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9957 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9961 * Virtual calls in llvm-only mode.
9963 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9964 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9969 INLINE_FAILURE ("call");
9970 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9971 imt_arg, vtable_arg);
9973 if (tail_call && !cfg->llvm_only) {
9974 link_bblock (cfg, cfg->cbb, end_bblock);
9975 start_new_bblock = 1;
9977 // FIXME: Eliminate unreachable epilogs
9980 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9981 * only reachable from this call.
9983 GET_BBLOCK (cfg, tblock, ip + 5);
9984 if (tblock == cfg->cbb || tblock->in_count == 0)
9991 /* End of call, INS should contain the result of the call, if any */
9993 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9996 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10001 if (keep_this_alive) {
10002 MonoInst *dummy_use;
10004 /* See mono_emit_method_call_full () */
10005 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10008 CHECK_CFG_EXCEPTION;
10012 g_assert (*ip == CEE_RET);
10016 constrained_class = NULL;
10017 if (need_seq_point)
10018 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10022 if (cfg->method != method) {
10023 /* return from inlined method */
10025 * If in_count == 0, that means the ret is unreachable due to
10026 * being preceeded by a throw. In that case, inline_method () will
10027 * handle setting the return value
10028 * (test case: test_0_inline_throw ()).
10030 if (return_var && cfg->cbb->in_count) {
10031 MonoType *ret_type = mono_method_signature (method)->ret;
10037 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10040 //g_assert (returnvar != -1);
10041 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10042 cfg->ret_var_set = TRUE;
10045 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10047 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10048 emit_pop_lmf (cfg);
10051 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10053 if (seq_points && !sym_seq_points) {
10055 * Place a seq point here too even through the IL stack is not
10056 * empty, so a step over on
10059 * will work correctly.
10061 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10062 MONO_ADD_INS (cfg->cbb, ins);
10065 g_assert (!return_var);
10069 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10072 emit_setret (cfg, *sp);
10075 if (sp != stack_start)
10077 MONO_INST_NEW (cfg, ins, OP_BR);
10079 ins->inst_target_bb = end_bblock;
10080 MONO_ADD_INS (cfg->cbb, ins);
10081 link_bblock (cfg, cfg->cbb, end_bblock);
10082 start_new_bblock = 1;
10086 MONO_INST_NEW (cfg, ins, OP_BR);
10088 target = ip + 1 + (signed char)(*ip);
10090 GET_BBLOCK (cfg, tblock, target);
10091 link_bblock (cfg, cfg->cbb, tblock);
10092 ins->inst_target_bb = tblock;
10093 if (sp != stack_start) {
10094 handle_stack_args (cfg, stack_start, sp - stack_start);
10096 CHECK_UNVERIFIABLE (cfg);
10098 MONO_ADD_INS (cfg->cbb, ins);
10099 start_new_bblock = 1;
10100 inline_costs += BRANCH_COST;
10114 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10116 target = ip + 1 + *(signed char*)ip;
10119 ADD_BINCOND (NULL);
10122 inline_costs += BRANCH_COST;
10126 MONO_INST_NEW (cfg, ins, OP_BR);
10129 target = ip + 4 + (gint32)read32(ip);
10131 GET_BBLOCK (cfg, tblock, target);
10132 link_bblock (cfg, cfg->cbb, tblock);
10133 ins->inst_target_bb = tblock;
10134 if (sp != stack_start) {
10135 handle_stack_args (cfg, stack_start, sp - stack_start);
10137 CHECK_UNVERIFIABLE (cfg);
10140 MONO_ADD_INS (cfg->cbb, ins);
10142 start_new_bblock = 1;
10143 inline_costs += BRANCH_COST;
10145 case CEE_BRFALSE_S:
10150 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10151 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10152 guint32 opsize = is_short ? 1 : 4;
10154 CHECK_OPSIZE (opsize);
10156 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10159 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10164 GET_BBLOCK (cfg, tblock, target);
10165 link_bblock (cfg, cfg->cbb, tblock);
10166 GET_BBLOCK (cfg, tblock, ip);
10167 link_bblock (cfg, cfg->cbb, tblock);
10169 if (sp != stack_start) {
10170 handle_stack_args (cfg, stack_start, sp - stack_start);
10171 CHECK_UNVERIFIABLE (cfg);
10174 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10175 cmp->sreg1 = sp [0]->dreg;
10176 type_from_op (cfg, cmp, sp [0], NULL);
10179 #if SIZEOF_REGISTER == 4
10180 if (cmp->opcode == OP_LCOMPARE_IMM) {
10181 /* Convert it to OP_LCOMPARE */
10182 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10183 ins->type = STACK_I8;
10184 ins->dreg = alloc_dreg (cfg, STACK_I8);
10186 MONO_ADD_INS (cfg->cbb, ins);
10187 cmp->opcode = OP_LCOMPARE;
10188 cmp->sreg2 = ins->dreg;
10191 MONO_ADD_INS (cfg->cbb, cmp);
10193 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10194 type_from_op (cfg, ins, sp [0], NULL);
10195 MONO_ADD_INS (cfg->cbb, ins);
10196 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10197 GET_BBLOCK (cfg, tblock, target);
10198 ins->inst_true_bb = tblock;
10199 GET_BBLOCK (cfg, tblock, ip);
10200 ins->inst_false_bb = tblock;
10201 start_new_bblock = 2;
10204 inline_costs += BRANCH_COST;
10219 MONO_INST_NEW (cfg, ins, *ip);
10221 target = ip + 4 + (gint32)read32(ip);
10224 ADD_BINCOND (NULL);
10227 inline_costs += BRANCH_COST;
10231 MonoBasicBlock **targets;
10232 MonoBasicBlock *default_bblock;
10233 MonoJumpInfoBBTable *table;
10234 int offset_reg = alloc_preg (cfg);
10235 int target_reg = alloc_preg (cfg);
10236 int table_reg = alloc_preg (cfg);
10237 int sum_reg = alloc_preg (cfg);
10238 gboolean use_op_switch;
10242 n = read32 (ip + 1);
10245 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10249 CHECK_OPSIZE (n * sizeof (guint32));
10250 target = ip + n * sizeof (guint32);
10252 GET_BBLOCK (cfg, default_bblock, target);
10253 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10255 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10256 for (i = 0; i < n; ++i) {
10257 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10258 targets [i] = tblock;
10259 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10263 if (sp != stack_start) {
10265 * Link the current bb with the targets as well, so handle_stack_args
10266 * will set their in_stack correctly.
10268 link_bblock (cfg, cfg->cbb, default_bblock);
10269 for (i = 0; i < n; ++i)
10270 link_bblock (cfg, cfg->cbb, targets [i]);
10272 handle_stack_args (cfg, stack_start, sp - stack_start);
10274 CHECK_UNVERIFIABLE (cfg);
10276 /* Undo the links */
10277 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10278 for (i = 0; i < n; ++i)
10279 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10283 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10285 for (i = 0; i < n; ++i)
10286 link_bblock (cfg, cfg->cbb, targets [i]);
10288 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10289 table->table = targets;
10290 table->table_size = n;
10292 use_op_switch = FALSE;
10294 /* ARM implements SWITCH statements differently */
10295 /* FIXME: Make it use the generic implementation */
10296 if (!cfg->compile_aot)
10297 use_op_switch = TRUE;
10300 if (COMPILE_LLVM (cfg))
10301 use_op_switch = TRUE;
10303 cfg->cbb->has_jump_table = 1;
10305 if (use_op_switch) {
10306 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10307 ins->sreg1 = src1->dreg;
10308 ins->inst_p0 = table;
10309 ins->inst_many_bb = targets;
10310 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10311 MONO_ADD_INS (cfg->cbb, ins);
10313 if (sizeof (gpointer) == 8)
10314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10316 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10318 #if SIZEOF_REGISTER == 8
10319 /* The upper word might not be zero, and we add it to a 64 bit address later */
10320 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10323 if (cfg->compile_aot) {
10324 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10326 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10327 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10328 ins->inst_p0 = table;
10329 ins->dreg = table_reg;
10330 MONO_ADD_INS (cfg->cbb, ins);
10333 /* FIXME: Use load_memindex */
10334 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10336 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10338 start_new_bblock = 1;
10339 inline_costs += (BRANCH_COST * 2);
10352 case CEE_LDIND_REF:
10359 dreg = alloc_freg (cfg);
10362 dreg = alloc_lreg (cfg);
10364 case CEE_LDIND_REF:
10365 dreg = alloc_ireg_ref (cfg);
10368 dreg = alloc_preg (cfg);
10371 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10372 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10373 if (*ip == CEE_LDIND_R4)
10374 ins->type = cfg->r4_stack_type;
10375 ins->flags |= ins_flag;
10376 MONO_ADD_INS (cfg->cbb, ins);
10378 if (ins_flag & MONO_INST_VOLATILE) {
10379 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10380 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10385 case CEE_STIND_REF:
10396 if (ins_flag & MONO_INST_VOLATILE) {
10397 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10398 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10401 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10402 ins->flags |= ins_flag;
10405 MONO_ADD_INS (cfg->cbb, ins);
10407 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10408 emit_write_barrier (cfg, sp [0], sp [1]);
10417 MONO_INST_NEW (cfg, ins, (*ip));
10419 ins->sreg1 = sp [0]->dreg;
10420 ins->sreg2 = sp [1]->dreg;
10421 type_from_op (cfg, ins, sp [0], sp [1]);
10423 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10425 /* Use the immediate opcodes if possible */
10426 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10427 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10428 if (imm_opcode != -1) {
10429 ins->opcode = imm_opcode;
10430 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10433 NULLIFY_INS (sp [1]);
10437 MONO_ADD_INS ((cfg)->cbb, (ins));
10439 *sp++ = mono_decompose_opcode (cfg, ins);
10456 MONO_INST_NEW (cfg, ins, (*ip));
10458 ins->sreg1 = sp [0]->dreg;
10459 ins->sreg2 = sp [1]->dreg;
10460 type_from_op (cfg, ins, sp [0], sp [1]);
10462 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10463 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10465 /* FIXME: Pass opcode to is_inst_imm */
10467 /* Use the immediate opcodes if possible */
10468 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10469 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10470 if (imm_opcode != -1) {
10471 ins->opcode = imm_opcode;
10472 if (sp [1]->opcode == OP_I8CONST) {
10473 #if SIZEOF_REGISTER == 8
10474 ins->inst_imm = sp [1]->inst_l;
10476 ins->inst_ls_word = sp [1]->inst_ls_word;
10477 ins->inst_ms_word = sp [1]->inst_ms_word;
10481 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10484 /* Might be followed by an instruction added by add_widen_op */
10485 if (sp [1]->next == NULL)
10486 NULLIFY_INS (sp [1]);
10489 MONO_ADD_INS ((cfg)->cbb, (ins));
10491 *sp++ = mono_decompose_opcode (cfg, ins);
10504 case CEE_CONV_OVF_I8:
10505 case CEE_CONV_OVF_U8:
10506 case CEE_CONV_R_UN:
10509 /* Special case this earlier so we have long constants in the IR */
10510 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10511 int data = sp [-1]->inst_c0;
10512 sp [-1]->opcode = OP_I8CONST;
10513 sp [-1]->type = STACK_I8;
10514 #if SIZEOF_REGISTER == 8
10515 if ((*ip) == CEE_CONV_U8)
10516 sp [-1]->inst_c0 = (guint32)data;
10518 sp [-1]->inst_c0 = data;
10520 sp [-1]->inst_ls_word = data;
10521 if ((*ip) == CEE_CONV_U8)
10522 sp [-1]->inst_ms_word = 0;
10524 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10526 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10533 case CEE_CONV_OVF_I4:
10534 case CEE_CONV_OVF_I1:
10535 case CEE_CONV_OVF_I2:
10536 case CEE_CONV_OVF_I:
10537 case CEE_CONV_OVF_U:
10540 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10541 ADD_UNOP (CEE_CONV_OVF_I8);
10548 case CEE_CONV_OVF_U1:
10549 case CEE_CONV_OVF_U2:
10550 case CEE_CONV_OVF_U4:
10553 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10554 ADD_UNOP (CEE_CONV_OVF_U8);
10561 case CEE_CONV_OVF_I1_UN:
10562 case CEE_CONV_OVF_I2_UN:
10563 case CEE_CONV_OVF_I4_UN:
10564 case CEE_CONV_OVF_I8_UN:
10565 case CEE_CONV_OVF_U1_UN:
10566 case CEE_CONV_OVF_U2_UN:
10567 case CEE_CONV_OVF_U4_UN:
10568 case CEE_CONV_OVF_U8_UN:
10569 case CEE_CONV_OVF_I_UN:
10570 case CEE_CONV_OVF_U_UN:
10577 CHECK_CFG_EXCEPTION;
10581 case CEE_ADD_OVF_UN:
10583 case CEE_MUL_OVF_UN:
10585 case CEE_SUB_OVF_UN:
10591 GSHAREDVT_FAILURE (*ip);
10594 token = read32 (ip + 1);
10595 klass = mini_get_class (method, token, generic_context);
10596 CHECK_TYPELOAD (klass);
10598 if (generic_class_is_reference_type (cfg, klass)) {
10599 MonoInst *store, *load;
10600 int dreg = alloc_ireg_ref (cfg);
10602 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10603 load->flags |= ins_flag;
10604 MONO_ADD_INS (cfg->cbb, load);
10606 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10607 store->flags |= ins_flag;
10608 MONO_ADD_INS (cfg->cbb, store);
10610 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10611 emit_write_barrier (cfg, sp [0], sp [1]);
10613 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10619 int loc_index = -1;
10625 token = read32 (ip + 1);
10626 klass = mini_get_class (method, token, generic_context);
10627 CHECK_TYPELOAD (klass);
10629 /* Optimize the common ldobj+stloc combination */
10632 loc_index = ip [6];
10639 loc_index = ip [5] - CEE_STLOC_0;
10646 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10647 CHECK_LOCAL (loc_index);
10649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10650 ins->dreg = cfg->locals [loc_index]->dreg;
10651 ins->flags |= ins_flag;
10654 if (ins_flag & MONO_INST_VOLATILE) {
10655 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10656 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10662 /* Optimize the ldobj+stobj combination */
10663 /* The reference case ends up being a load+store anyway */
10664 /* Skip this if the operation is volatile. */
10665 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10670 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10677 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10678 ins->flags |= ins_flag;
10681 if (ins_flag & MONO_INST_VOLATILE) {
10682 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10683 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10692 CHECK_STACK_OVF (1);
10694 n = read32 (ip + 1);
10696 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10697 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10698 ins->type = STACK_OBJ;
10701 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10702 MonoInst *iargs [1];
10703 char *str = (char *)mono_method_get_wrapper_data (method, n);
10705 if (cfg->compile_aot)
10706 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10708 EMIT_NEW_PCONST (cfg, iargs [0], str);
10709 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10711 if (cfg->opt & MONO_OPT_SHARED) {
10712 MonoInst *iargs [3];
10714 if (cfg->compile_aot) {
10715 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10717 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10718 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10719 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10720 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10721 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10724 if (cfg->cbb->out_of_line) {
10725 MonoInst *iargs [2];
10727 if (image == mono_defaults.corlib) {
10729 * Avoid relocations in AOT and save some space by using a
10730 * version of helper_ldstr specialized to mscorlib.
10732 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10733 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10735 /* Avoid creating the string object */
10736 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10737 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10738 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10742 if (cfg->compile_aot) {
10743 NEW_LDSTRCONST (cfg, ins, image, n);
10745 MONO_ADD_INS (cfg->cbb, ins);
10748 NEW_PCONST (cfg, ins, NULL);
10749 ins->type = STACK_OBJ;
10750 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10754 OUT_OF_MEMORY_FAILURE;
10757 MONO_ADD_INS (cfg->cbb, ins);
10766 MonoInst *iargs [2];
10767 MonoMethodSignature *fsig;
10770 MonoInst *vtable_arg = NULL;
10773 token = read32 (ip + 1);
10774 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10777 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10780 mono_save_token_info (cfg, image, token, cmethod);
10782 if (!mono_class_init (cmethod->klass))
10783 TYPE_LOAD_ERROR (cmethod->klass);
10785 context_used = mini_method_check_context_used (cfg, cmethod);
10787 if (mono_security_core_clr_enabled ())
10788 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10790 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10791 emit_class_init (cfg, cmethod->klass);
10792 CHECK_TYPELOAD (cmethod->klass);
10796 if (cfg->gsharedvt) {
10797 if (mini_is_gsharedvt_variable_signature (sig))
10798 GSHAREDVT_FAILURE (*ip);
10802 n = fsig->param_count;
10806 * Generate smaller code for the common newobj <exception> instruction in
10807 * argument checking code.
10809 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10810 is_exception_class (cmethod->klass) && n <= 2 &&
10811 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10812 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10813 MonoInst *iargs [3];
10817 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10820 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10823 iargs [1] = sp [0];
10824 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10827 iargs [1] = sp [0];
10828 iargs [2] = sp [1];
10829 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10832 g_assert_not_reached ();
10840 /* move the args to allow room for 'this' in the first position */
10846 /* check_call_signature () requires sp[0] to be set */
10847 this_ins.type = STACK_OBJ;
10848 sp [0] = &this_ins;
10849 if (check_call_signature (cfg, fsig, sp))
10854 if (mini_class_is_system_array (cmethod->klass)) {
10855 *sp = emit_get_rgctx_method (cfg, context_used,
10856 cmethod, MONO_RGCTX_INFO_METHOD);
10858 /* Avoid varargs in the common case */
10859 if (fsig->param_count == 1)
10860 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10861 else if (fsig->param_count == 2)
10862 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10863 else if (fsig->param_count == 3)
10864 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10865 else if (fsig->param_count == 4)
10866 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10868 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10869 } else if (cmethod->string_ctor) {
10870 g_assert (!context_used);
10871 g_assert (!vtable_arg);
10872 /* we simply pass a null pointer */
10873 EMIT_NEW_PCONST (cfg, *sp, NULL);
10874 /* now call the string ctor */
10875 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10877 if (cmethod->klass->valuetype) {
10878 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10879 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10880 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10885 * The code generated by mini_emit_virtual_call () expects
10886 * iargs [0] to be a boxed instance, but luckily the vcall
10887 * will be transformed into a normal call there.
10889 } else if (context_used) {
10890 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10893 MonoVTable *vtable = NULL;
10895 if (!cfg->compile_aot)
10896 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10897 CHECK_TYPELOAD (cmethod->klass);
10900 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10901 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10902 * As a workaround, we call class cctors before allocating objects.
10904 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10905 emit_class_init (cfg, cmethod->klass);
10906 if (cfg->verbose_level > 2)
10907 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10908 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10911 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10914 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10917 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10919 /* Now call the actual ctor */
10920 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10921 CHECK_CFG_EXCEPTION;
10924 if (alloc == NULL) {
10926 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10927 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10935 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10936 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10939 case CEE_CASTCLASS:
10944 token = read32 (ip + 1);
10945 klass = mini_get_class (method, token, generic_context);
10946 CHECK_TYPELOAD (klass);
10947 if (sp [0]->type != STACK_OBJ)
10950 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10951 ins->dreg = alloc_preg (cfg);
10952 ins->sreg1 = (*sp)->dreg;
10953 ins->klass = klass;
10954 ins->type = STACK_OBJ;
10955 MONO_ADD_INS (cfg->cbb, ins);
10957 CHECK_CFG_EXCEPTION;
10961 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10964 case CEE_UNBOX_ANY: {
10965 MonoInst *res, *addr;
10970 token = read32 (ip + 1);
10971 klass = mini_get_class (method, token, generic_context);
10972 CHECK_TYPELOAD (klass);
10974 mono_save_token_info (cfg, image, token, klass);
10976 context_used = mini_class_check_context_used (cfg, klass);
10978 if (mini_is_gsharedvt_klass (klass)) {
10979 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10981 } else if (generic_class_is_reference_type (cfg, klass)) {
10982 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10983 res->dreg = alloc_preg (cfg);
10984 res->sreg1 = (*sp)->dreg;
10985 res->klass = klass;
10986 res->type = STACK_OBJ;
10987 MONO_ADD_INS (cfg->cbb, res);
10988 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10989 } else if (mono_class_is_nullable (klass)) {
10990 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10992 addr = handle_unbox (cfg, klass, sp, context_used);
10994 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11005 MonoClass *enum_class;
11006 MonoMethod *has_flag;
11012 token = read32 (ip + 1);
11013 klass = mini_get_class (method, token, generic_context);
11014 CHECK_TYPELOAD (klass);
11016 mono_save_token_info (cfg, image, token, klass);
11018 context_used = mini_class_check_context_used (cfg, klass);
11020 if (generic_class_is_reference_type (cfg, klass)) {
11026 if (klass == mono_defaults.void_class)
11028 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11030 /* frequent check in generic code: box (struct), brtrue */
11035 * <push int/long ptr>
11038 * constrained. MyFlags
11039 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11041 * If we find this sequence and the operand types on box and constrained
11042 * are equal, we can emit a specialized instruction sequence instead of
11043 * the very slow HasFlag () call.
11045 if ((cfg->opt & MONO_OPT_INTRINS) &&
11046 /* Cheap checks first. */
11047 ip + 5 + 6 + 5 < end &&
11048 ip [5] == CEE_PREFIX1 &&
11049 ip [6] == CEE_CONSTRAINED_ &&
11050 ip [11] == CEE_CALLVIRT &&
11051 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11052 mono_class_is_enum (klass) &&
11053 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11054 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11055 has_flag->klass == mono_defaults.enum_class &&
11056 !strcmp (has_flag->name, "HasFlag") &&
11057 has_flag->signature->hasthis &&
11058 has_flag->signature->param_count == 1) {
11059 CHECK_TYPELOAD (enum_class);
11061 if (enum_class == klass) {
11062 MonoInst *enum_this, *enum_flag;
11067 enum_this = sp [0];
11068 enum_flag = sp [1];
11070 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11075 // FIXME: LLVM can't handle the inconsistent bb linking
11076 if (!mono_class_is_nullable (klass) &&
11077 !mini_is_gsharedvt_klass (klass) &&
11078 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11079 (ip [5] == CEE_BRTRUE ||
11080 ip [5] == CEE_BRTRUE_S ||
11081 ip [5] == CEE_BRFALSE ||
11082 ip [5] == CEE_BRFALSE_S)) {
11083 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11085 MonoBasicBlock *true_bb, *false_bb;
11089 if (cfg->verbose_level > 3) {
11090 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11091 printf ("<box+brtrue opt>\n");
11096 case CEE_BRFALSE_S:
11099 target = ip + 1 + (signed char)(*ip);
11106 target = ip + 4 + (gint)(read32 (ip));
11110 g_assert_not_reached ();
11114 * We need to link both bblocks, since it is needed for handling stack
11115 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11116 * Branching to only one of them would lead to inconsistencies, so
11117 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11119 GET_BBLOCK (cfg, true_bb, target);
11120 GET_BBLOCK (cfg, false_bb, ip);
11122 mono_link_bblock (cfg, cfg->cbb, true_bb);
11123 mono_link_bblock (cfg, cfg->cbb, false_bb);
11125 if (sp != stack_start) {
11126 handle_stack_args (cfg, stack_start, sp - stack_start);
11128 CHECK_UNVERIFIABLE (cfg);
11131 if (COMPILE_LLVM (cfg)) {
11132 dreg = alloc_ireg (cfg);
11133 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11136 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11138 /* The JIT can't eliminate the iconst+compare */
11139 MONO_INST_NEW (cfg, ins, OP_BR);
11140 ins->inst_target_bb = is_true ? true_bb : false_bb;
11141 MONO_ADD_INS (cfg->cbb, ins);
11144 start_new_bblock = 1;
11148 *sp++ = handle_box (cfg, val, klass, context_used);
11150 CHECK_CFG_EXCEPTION;
11159 token = read32 (ip + 1);
11160 klass = mini_get_class (method, token, generic_context);
11161 CHECK_TYPELOAD (klass);
11163 mono_save_token_info (cfg, image, token, klass);
11165 context_used = mini_class_check_context_used (cfg, klass);
11167 if (mono_class_is_nullable (klass)) {
11170 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11171 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11175 ins = handle_unbox (cfg, klass, sp, context_used);
11188 MonoClassField *field;
11189 #ifndef DISABLE_REMOTING
11193 gboolean is_instance;
11195 gpointer addr = NULL;
11196 gboolean is_special_static;
11198 MonoInst *store_val = NULL;
11199 MonoInst *thread_ins;
11202 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11204 if (op == CEE_STFLD) {
11207 store_val = sp [1];
11212 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11214 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11217 if (op == CEE_STSFLD) {
11220 store_val = sp [0];
11225 token = read32 (ip + 1);
11226 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11227 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11228 klass = field->parent;
11231 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11234 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11235 FIELD_ACCESS_FAILURE (method, field);
11236 mono_class_init (klass);
11238 /* if the class is Critical then transparent code cannot access it's fields */
11239 if (!is_instance && mono_security_core_clr_enabled ())
11240 ensure_method_is_allowed_to_access_field (cfg, method, field);
11242 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11243 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11244 if (mono_security_core_clr_enabled ())
11245 ensure_method_is_allowed_to_access_field (cfg, method, field);
11248 ftype = mono_field_get_type (field);
11251 * LDFLD etc. is usable on static fields as well, so convert those cases to
11254 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11266 g_assert_not_reached ();
11268 is_instance = FALSE;
11271 context_used = mini_class_check_context_used (cfg, klass);
11273 /* INSTANCE CASE */
11275 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11276 if (op == CEE_STFLD) {
11277 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11279 #ifndef DISABLE_REMOTING
11280 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11281 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11282 MonoInst *iargs [5];
11284 GSHAREDVT_FAILURE (op);
11286 iargs [0] = sp [0];
11287 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11288 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11289 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11291 iargs [4] = sp [1];
11293 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11294 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11295 iargs, ip, cfg->real_offset, TRUE);
11296 CHECK_CFG_EXCEPTION;
11297 g_assert (costs > 0);
11299 cfg->real_offset += 5;
11301 inline_costs += costs;
11303 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11308 MonoInst *store, *wbarrier_ptr_ins = NULL;
11310 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11312 if (mini_is_gsharedvt_klass (klass)) {
11313 MonoInst *offset_ins;
11315 context_used = mini_class_check_context_used (cfg, klass);
11317 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11318 /* The value is offset by 1 */
11319 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11320 dreg = alloc_ireg_mp (cfg);
11321 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11322 wbarrier_ptr_ins = ins;
11323 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11324 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11326 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11328 if (sp [0]->opcode != OP_LDADDR)
11329 store->flags |= MONO_INST_FAULT;
11331 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11332 if (mini_is_gsharedvt_klass (klass)) {
11333 g_assert (wbarrier_ptr_ins);
11334 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
11336 /* insert call to write barrier */
11340 dreg = alloc_ireg_mp (cfg);
11341 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11342 emit_write_barrier (cfg, ptr, sp [1]);
11346 store->flags |= ins_flag;
11353 #ifndef DISABLE_REMOTING
11354 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11355 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11356 MonoInst *iargs [4];
11358 GSHAREDVT_FAILURE (op);
11360 iargs [0] = sp [0];
11361 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11362 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11363 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11364 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11365 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11366 iargs, ip, cfg->real_offset, TRUE);
11367 CHECK_CFG_EXCEPTION;
11368 g_assert (costs > 0);
11370 cfg->real_offset += 5;
11374 inline_costs += costs;
11376 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11382 if (sp [0]->type == STACK_VTYPE) {
11385 /* Have to compute the address of the variable */
11387 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11389 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11391 g_assert (var->klass == klass);
11393 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11397 if (op == CEE_LDFLDA) {
11398 if (sp [0]->type == STACK_OBJ) {
11399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11400 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11403 dreg = alloc_ireg_mp (cfg);
11405 if (mini_is_gsharedvt_klass (klass)) {
11406 MonoInst *offset_ins;
11408 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11409 /* The value is offset by 1 */
11410 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11411 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11413 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11415 ins->klass = mono_class_from_mono_type (field->type);
11416 ins->type = STACK_MP;
11421 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11423 if (mini_is_gsharedvt_klass (klass)) {
11424 MonoInst *offset_ins;
11426 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11427 /* The value is offset by 1 */
11428 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11429 dreg = alloc_ireg_mp (cfg);
11430 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11431 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11433 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11435 load->flags |= ins_flag;
11436 if (sp [0]->opcode != OP_LDADDR)
11437 load->flags |= MONO_INST_FAULT;
11449 context_used = mini_class_check_context_used (cfg, klass);
11451 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
11452 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
11456 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11457 * to be called here.
11459 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11460 mono_class_vtable (cfg->domain, klass);
11461 CHECK_TYPELOAD (klass);
11463 mono_domain_lock (cfg->domain);
11464 if (cfg->domain->special_static_fields)
11465 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11466 mono_domain_unlock (cfg->domain);
11468 is_special_static = mono_class_field_is_special_static (field);
11470 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11471 thread_ins = mono_get_thread_intrinsic (cfg);
11475 /* Generate IR to compute the field address */
11476 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11478 * Fast access to TLS data
11479 * Inline version of get_thread_static_data () in
11483 int idx, static_data_reg, array_reg, dreg;
11485 GSHAREDVT_FAILURE (op);
11487 MONO_ADD_INS (cfg->cbb, thread_ins);
11488 static_data_reg = alloc_ireg (cfg);
11489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11491 if (cfg->compile_aot) {
11492 int offset_reg, offset2_reg, idx_reg;
11494 /* For TLS variables, this will return the TLS offset */
11495 EMIT_NEW_SFLDACONST (cfg, ins, field);
11496 offset_reg = ins->dreg;
11497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11498 idx_reg = alloc_ireg (cfg);
11499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11501 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11502 array_reg = alloc_ireg (cfg);
11503 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11504 offset2_reg = alloc_ireg (cfg);
11505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11507 dreg = alloc_ireg (cfg);
11508 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11510 offset = (gsize)addr & 0x7fffffff;
11511 idx = offset & 0x3f;
11513 array_reg = alloc_ireg (cfg);
11514 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11515 dreg = alloc_ireg (cfg);
11516 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11518 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11519 (cfg->compile_aot && is_special_static) ||
11520 (context_used && is_special_static)) {
11521 MonoInst *iargs [2];
11523 g_assert (field->parent);
11524 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11525 if (context_used) {
11526 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11527 field, MONO_RGCTX_INFO_CLASS_FIELD);
11529 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11531 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11532 } else if (context_used) {
11533 MonoInst *static_data;
11536 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11537 method->klass->name_space, method->klass->name, method->name,
11538 depth, field->offset);
11541 if (mono_class_needs_cctor_run (klass, method))
11542 emit_class_init (cfg, klass);
11545 * The pointer we're computing here is
11547 * super_info.static_data + field->offset
11549 static_data = emit_get_rgctx_klass (cfg, context_used,
11550 klass, MONO_RGCTX_INFO_STATIC_DATA);
11552 if (mini_is_gsharedvt_klass (klass)) {
11553 MonoInst *offset_ins;
11555 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11556 /* The value is offset by 1 */
11557 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11558 dreg = alloc_ireg_mp (cfg);
11559 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11560 } else if (field->offset == 0) {
11563 int addr_reg = mono_alloc_preg (cfg);
11564 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11566 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11567 MonoInst *iargs [2];
11569 g_assert (field->parent);
11570 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11571 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11572 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11574 MonoVTable *vtable = NULL;
11576 if (!cfg->compile_aot)
11577 vtable = mono_class_vtable (cfg->domain, klass);
11578 CHECK_TYPELOAD (klass);
11581 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11582 if (!(g_slist_find (class_inits, klass))) {
11583 emit_class_init (cfg, klass);
11584 if (cfg->verbose_level > 2)
11585 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11586 class_inits = g_slist_prepend (class_inits, klass);
11589 if (cfg->run_cctors) {
11590 /* This makes so that inline cannot trigger */
11591 /* .cctors: too many apps depend on them */
11592 /* running with a specific order... */
11594 if (! vtable->initialized)
11595 INLINE_FAILURE ("class init");
11596 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11597 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11598 goto exception_exit;
11602 if (cfg->compile_aot)
11603 EMIT_NEW_SFLDACONST (cfg, ins, field);
11606 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11608 EMIT_NEW_PCONST (cfg, ins, addr);
11611 MonoInst *iargs [1];
11612 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11613 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11617 /* Generate IR to do the actual load/store operation */
11619 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11620 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11621 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11624 if (op == CEE_LDSFLDA) {
11625 ins->klass = mono_class_from_mono_type (ftype);
11626 ins->type = STACK_PTR;
11628 } else if (op == CEE_STSFLD) {
11631 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11632 store->flags |= ins_flag;
11634 gboolean is_const = FALSE;
11635 MonoVTable *vtable = NULL;
11636 gpointer addr = NULL;
11638 if (!context_used) {
11639 vtable = mono_class_vtable (cfg->domain, klass);
11640 CHECK_TYPELOAD (klass);
11642 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11643 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11644 int ro_type = ftype->type;
11646 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11647 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11648 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11651 GSHAREDVT_FAILURE (op);
11653 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11656 case MONO_TYPE_BOOLEAN:
11658 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11662 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11665 case MONO_TYPE_CHAR:
11667 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11671 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11676 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11680 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11685 case MONO_TYPE_PTR:
11686 case MONO_TYPE_FNPTR:
11687 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11688 type_to_eval_stack_type ((cfg), field->type, *sp);
11691 case MONO_TYPE_STRING:
11692 case MONO_TYPE_OBJECT:
11693 case MONO_TYPE_CLASS:
11694 case MONO_TYPE_SZARRAY:
11695 case MONO_TYPE_ARRAY:
11696 if (!mono_gc_is_moving ()) {
11697 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11698 type_to_eval_stack_type ((cfg), field->type, *sp);
11706 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11711 case MONO_TYPE_VALUETYPE:
11721 CHECK_STACK_OVF (1);
11723 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11724 load->flags |= ins_flag;
11730 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11731 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11732 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11743 token = read32 (ip + 1);
11744 klass = mini_get_class (method, token, generic_context);
11745 CHECK_TYPELOAD (klass);
11746 if (ins_flag & MONO_INST_VOLATILE) {
11747 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11748 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11750 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11751 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11752 ins->flags |= ins_flag;
11753 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11754 generic_class_is_reference_type (cfg, klass)) {
11755 /* insert call to write barrier */
11756 emit_write_barrier (cfg, sp [0], sp [1]);
11768 const char *data_ptr;
11770 guint32 field_token;
11776 token = read32 (ip + 1);
11778 klass = mini_get_class (method, token, generic_context);
11779 CHECK_TYPELOAD (klass);
11781 context_used = mini_class_check_context_used (cfg, klass);
11783 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11784 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11785 ins->sreg1 = sp [0]->dreg;
11786 ins->type = STACK_I4;
11787 ins->dreg = alloc_ireg (cfg);
11788 MONO_ADD_INS (cfg->cbb, ins);
11789 *sp = mono_decompose_opcode (cfg, ins);
11792 if (context_used) {
11793 MonoInst *args [3];
11794 MonoClass *array_class = mono_array_class_get (klass, 1);
11795 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11797 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11800 args [0] = emit_get_rgctx_klass (cfg, context_used,
11801 array_class, MONO_RGCTX_INFO_VTABLE);
11806 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11808 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11810 if (cfg->opt & MONO_OPT_SHARED) {
11811 /* Decompose now to avoid problems with references to the domainvar */
11812 MonoInst *iargs [3];
11814 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11815 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11816 iargs [2] = sp [0];
11818 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11820 /* Decompose later since it is needed by abcrem */
11821 MonoClass *array_type = mono_array_class_get (klass, 1);
11822 mono_class_vtable (cfg->domain, array_type);
11823 CHECK_TYPELOAD (array_type);
11825 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11826 ins->dreg = alloc_ireg_ref (cfg);
11827 ins->sreg1 = sp [0]->dreg;
11828 ins->inst_newa_class = klass;
11829 ins->type = STACK_OBJ;
11830 ins->klass = array_type;
11831 MONO_ADD_INS (cfg->cbb, ins);
11832 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11833 cfg->cbb->has_array_access = TRUE;
11835 /* Needed so mono_emit_load_get_addr () gets called */
11836 mono_get_got_var (cfg);
11846 * we inline/optimize the initialization sequence if possible.
11847 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11848 * for small sizes open code the memcpy
11849 * ensure the rva field is big enough
11851 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11852 MonoMethod *memcpy_method = get_memcpy_method ();
11853 MonoInst *iargs [3];
11854 int add_reg = alloc_ireg_mp (cfg);
11856 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11857 if (cfg->compile_aot) {
11858 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11860 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11862 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11863 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11872 if (sp [0]->type != STACK_OBJ)
11875 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11876 ins->dreg = alloc_preg (cfg);
11877 ins->sreg1 = sp [0]->dreg;
11878 ins->type = STACK_I4;
11879 /* This flag will be inherited by the decomposition */
11880 ins->flags |= MONO_INST_FAULT;
11881 MONO_ADD_INS (cfg->cbb, ins);
11882 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11883 cfg->cbb->has_array_access = TRUE;
11891 if (sp [0]->type != STACK_OBJ)
11894 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11896 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11897 CHECK_TYPELOAD (klass);
11898 /* we need to make sure that this array is exactly the type it needs
11899 * to be for correctness. the wrappers are lax with their usage
11900 * so we need to ignore them here
11902 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11903 MonoClass *array_class = mono_array_class_get (klass, 1);
11904 mini_emit_check_array_type (cfg, sp [0], array_class);
11905 CHECK_TYPELOAD (array_class);
11909 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11914 case CEE_LDELEM_I1:
11915 case CEE_LDELEM_U1:
11916 case CEE_LDELEM_I2:
11917 case CEE_LDELEM_U2:
11918 case CEE_LDELEM_I4:
11919 case CEE_LDELEM_U4:
11920 case CEE_LDELEM_I8:
11922 case CEE_LDELEM_R4:
11923 case CEE_LDELEM_R8:
11924 case CEE_LDELEM_REF: {
11930 if (*ip == CEE_LDELEM) {
11932 token = read32 (ip + 1);
11933 klass = mini_get_class (method, token, generic_context);
11934 CHECK_TYPELOAD (klass);
11935 mono_class_init (klass);
11938 klass = array_access_to_klass (*ip);
11940 if (sp [0]->type != STACK_OBJ)
11943 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11945 if (mini_is_gsharedvt_variable_klass (klass)) {
11946 // FIXME-VT: OP_ICONST optimization
11947 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11948 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11949 ins->opcode = OP_LOADV_MEMBASE;
11950 } else if (sp [1]->opcode == OP_ICONST) {
11951 int array_reg = sp [0]->dreg;
11952 int index_reg = sp [1]->dreg;
11953 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11955 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11956 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11958 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11959 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11961 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11962 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11965 if (*ip == CEE_LDELEM)
11972 case CEE_STELEM_I1:
11973 case CEE_STELEM_I2:
11974 case CEE_STELEM_I4:
11975 case CEE_STELEM_I8:
11976 case CEE_STELEM_R4:
11977 case CEE_STELEM_R8:
11978 case CEE_STELEM_REF:
11983 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11985 if (*ip == CEE_STELEM) {
11987 token = read32 (ip + 1);
11988 klass = mini_get_class (method, token, generic_context);
11989 CHECK_TYPELOAD (klass);
11990 mono_class_init (klass);
11993 klass = array_access_to_klass (*ip);
11995 if (sp [0]->type != STACK_OBJ)
11998 emit_array_store (cfg, klass, sp, TRUE);
12000 if (*ip == CEE_STELEM)
12007 case CEE_CKFINITE: {
12011 if (cfg->llvm_only) {
12012 MonoInst *iargs [1];
12014 iargs [0] = sp [0];
12015 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12017 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12018 ins->sreg1 = sp [0]->dreg;
12019 ins->dreg = alloc_freg (cfg);
12020 ins->type = STACK_R8;
12021 MONO_ADD_INS (cfg->cbb, ins);
12023 *sp++ = mono_decompose_opcode (cfg, ins);
12029 case CEE_REFANYVAL: {
12030 MonoInst *src_var, *src;
12032 int klass_reg = alloc_preg (cfg);
12033 int dreg = alloc_preg (cfg);
12035 GSHAREDVT_FAILURE (*ip);
12038 MONO_INST_NEW (cfg, ins, *ip);
12041 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12042 CHECK_TYPELOAD (klass);
12044 context_used = mini_class_check_context_used (cfg, klass);
12047 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12049 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12050 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12053 if (context_used) {
12054 MonoInst *klass_ins;
12056 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12057 klass, MONO_RGCTX_INFO_KLASS);
12060 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12061 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12063 mini_emit_class_check (cfg, klass_reg, klass);
12065 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12066 ins->type = STACK_MP;
12067 ins->klass = klass;
12072 case CEE_MKREFANY: {
12073 MonoInst *loc, *addr;
12075 GSHAREDVT_FAILURE (*ip);
12078 MONO_INST_NEW (cfg, ins, *ip);
12081 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12082 CHECK_TYPELOAD (klass);
12084 context_used = mini_class_check_context_used (cfg, klass);
12086 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12087 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12089 if (context_used) {
12090 MonoInst *const_ins;
12091 int type_reg = alloc_preg (cfg);
12093 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12094 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12096 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12097 } else if (cfg->compile_aot) {
12098 int const_reg = alloc_preg (cfg);
12099 int type_reg = alloc_preg (cfg);
12101 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12102 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12103 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12104 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12106 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12107 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12109 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12111 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12112 ins->type = STACK_VTYPE;
12113 ins->klass = mono_defaults.typed_reference_class;
12118 case CEE_LDTOKEN: {
12120 MonoClass *handle_class;
12122 CHECK_STACK_OVF (1);
12125 n = read32 (ip + 1);
12127 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12128 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12129 handle = mono_method_get_wrapper_data (method, n);
12130 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12131 if (handle_class == mono_defaults.typehandle_class)
12132 handle = &((MonoClass*)handle)->byval_arg;
12135 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12140 mono_class_init (handle_class);
12141 if (cfg->gshared) {
12142 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12143 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12144 /* This case handles ldtoken
12145 of an open type, like for
12148 } else if (handle_class == mono_defaults.typehandle_class) {
12149 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12150 } else if (handle_class == mono_defaults.fieldhandle_class)
12151 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12152 else if (handle_class == mono_defaults.methodhandle_class)
12153 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12155 g_assert_not_reached ();
12158 if ((cfg->opt & MONO_OPT_SHARED) &&
12159 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12160 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12161 MonoInst *addr, *vtvar, *iargs [3];
12162 int method_context_used;
12164 method_context_used = mini_method_check_context_used (cfg, method);
12166 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12168 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12169 EMIT_NEW_ICONST (cfg, iargs [1], n);
12170 if (method_context_used) {
12171 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12172 method, MONO_RGCTX_INFO_METHOD);
12173 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12175 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12176 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12178 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12182 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12184 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12185 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12186 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12187 (cmethod->klass == mono_defaults.systemtype_class) &&
12188 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12189 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12191 mono_class_init (tclass);
12192 if (context_used) {
12193 ins = emit_get_rgctx_klass (cfg, context_used,
12194 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12195 } else if (cfg->compile_aot) {
12196 if (method->wrapper_type) {
12197 mono_error_init (&error); //got to do it since there are multiple conditionals below
12198 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12199 /* Special case for static synchronized wrappers */
12200 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12202 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12203 /* FIXME: n is not a normal token */
12205 EMIT_NEW_PCONST (cfg, ins, NULL);
12208 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12211 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12213 EMIT_NEW_PCONST (cfg, ins, rt);
12215 ins->type = STACK_OBJ;
12216 ins->klass = cmethod->klass;
12219 MonoInst *addr, *vtvar;
12221 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12223 if (context_used) {
12224 if (handle_class == mono_defaults.typehandle_class) {
12225 ins = emit_get_rgctx_klass (cfg, context_used,
12226 mono_class_from_mono_type ((MonoType *)handle),
12227 MONO_RGCTX_INFO_TYPE);
12228 } else if (handle_class == mono_defaults.methodhandle_class) {
12229 ins = emit_get_rgctx_method (cfg, context_used,
12230 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12231 } else if (handle_class == mono_defaults.fieldhandle_class) {
12232 ins = emit_get_rgctx_field (cfg, context_used,
12233 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12235 g_assert_not_reached ();
12237 } else if (cfg->compile_aot) {
12238 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12240 EMIT_NEW_PCONST (cfg, ins, handle);
12242 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12243 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12244 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12254 MONO_INST_NEW (cfg, ins, OP_THROW);
12256 ins->sreg1 = sp [0]->dreg;
12258 cfg->cbb->out_of_line = TRUE;
12259 MONO_ADD_INS (cfg->cbb, ins);
12260 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12261 MONO_ADD_INS (cfg->cbb, ins);
12264 link_bblock (cfg, cfg->cbb, end_bblock);
12265 start_new_bblock = 1;
12266 /* This can complicate code generation for llvm since the return value might not be defined */
12267 if (COMPILE_LLVM (cfg))
12268 INLINE_FAILURE ("throw");
12270 case CEE_ENDFINALLY:
12271 /* mono_save_seq_point_info () depends on this */
12272 if (sp != stack_start)
12273 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12274 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12275 MONO_ADD_INS (cfg->cbb, ins);
12277 start_new_bblock = 1;
12280 * Control will leave the method so empty the stack, otherwise
12281 * the next basic block will start with a nonempty stack.
12283 while (sp != stack_start) {
12288 case CEE_LEAVE_S: {
12291 if (*ip == CEE_LEAVE) {
12293 target = ip + 5 + (gint32)read32(ip + 1);
12296 target = ip + 2 + (signed char)(ip [1]);
12299 /* empty the stack */
12300 while (sp != stack_start) {
12305 * If this leave statement is in a catch block, check for a
12306 * pending exception, and rethrow it if necessary.
12307 * We avoid doing this in runtime invoke wrappers, since those are called
12308 * by native code which excepts the wrapper to catch all exceptions.
12310 for (i = 0; i < header->num_clauses; ++i) {
12311 MonoExceptionClause *clause = &header->clauses [i];
12314 * Use <= in the final comparison to handle clauses with multiple
12315 * leave statements, like in bug #78024.
12316 * The ordering of the exception clauses guarantees that we find the
12317 * innermost clause.
12319 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12321 MonoBasicBlock *dont_throw;
12326 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12329 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12331 NEW_BBLOCK (cfg, dont_throw);
12334 * Currently, we always rethrow the abort exception, despite the
12335 * fact that this is not correct. See thread6.cs for an example.
12336 * But propagating the abort exception is more important than
12337 * getting the sematics right.
12339 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12340 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12341 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12343 MONO_START_BB (cfg, dont_throw);
12348 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12351 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12353 MonoExceptionClause *clause;
12355 for (tmp = handlers; tmp; tmp = tmp->next) {
12356 clause = (MonoExceptionClause *)tmp->data;
12357 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12359 link_bblock (cfg, cfg->cbb, tblock);
12360 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12361 ins->inst_target_bb = tblock;
12362 ins->inst_eh_block = clause;
12363 MONO_ADD_INS (cfg->cbb, ins);
12364 cfg->cbb->has_call_handler = 1;
12365 if (COMPILE_LLVM (cfg)) {
12366 MonoBasicBlock *target_bb;
12369 * Link the finally bblock with the target, since it will
12370 * conceptually branch there.
12372 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12373 GET_BBLOCK (cfg, target_bb, target);
12374 link_bblock (cfg, tblock, target_bb);
12377 g_list_free (handlers);
12380 MONO_INST_NEW (cfg, ins, OP_BR);
12381 MONO_ADD_INS (cfg->cbb, ins);
12382 GET_BBLOCK (cfg, tblock, target);
12383 link_bblock (cfg, cfg->cbb, tblock);
12384 ins->inst_target_bb = tblock;
12386 start_new_bblock = 1;
12388 if (*ip == CEE_LEAVE)
12397 * Mono specific opcodes
12399 case MONO_CUSTOM_PREFIX: {
12401 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12405 case CEE_MONO_ICALL: {
12407 MonoJitICallInfo *info;
12409 token = read32 (ip + 2);
12410 func = mono_method_get_wrapper_data (method, token);
12411 info = mono_find_jit_icall_by_addr (func);
12413 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12416 CHECK_STACK (info->sig->param_count);
12417 sp -= info->sig->param_count;
12419 ins = mono_emit_jit_icall (cfg, info->func, sp);
12420 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12424 inline_costs += 10 * num_calls++;
12428 case CEE_MONO_LDPTR_CARD_TABLE:
12429 case CEE_MONO_LDPTR_NURSERY_START:
12430 case CEE_MONO_LDPTR_NURSERY_BITS:
12431 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12432 CHECK_STACK_OVF (1);
12435 case CEE_MONO_LDPTR_CARD_TABLE:
12436 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12438 case CEE_MONO_LDPTR_NURSERY_START:
12439 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12441 case CEE_MONO_LDPTR_NURSERY_BITS:
12442 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12444 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12445 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12451 inline_costs += 10 * num_calls++;
12454 case CEE_MONO_LDPTR: {
12457 CHECK_STACK_OVF (1);
12459 token = read32 (ip + 2);
12461 ptr = mono_method_get_wrapper_data (method, token);
12462 EMIT_NEW_PCONST (cfg, ins, ptr);
12465 inline_costs += 10 * num_calls++;
12466 /* Can't embed random pointers into AOT code */
12470 case CEE_MONO_JIT_ICALL_ADDR: {
12471 MonoJitICallInfo *callinfo;
12474 CHECK_STACK_OVF (1);
12476 token = read32 (ip + 2);
12478 ptr = mono_method_get_wrapper_data (method, token);
12479 callinfo = mono_find_jit_icall_by_addr (ptr);
12480 g_assert (callinfo);
12481 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12484 inline_costs += 10 * num_calls++;
12487 case CEE_MONO_ICALL_ADDR: {
12488 MonoMethod *cmethod;
12491 CHECK_STACK_OVF (1);
12493 token = read32 (ip + 2);
12495 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12497 if (cfg->compile_aot) {
12498 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12500 ptr = mono_lookup_internal_call (cmethod);
12502 EMIT_NEW_PCONST (cfg, ins, ptr);
12508 case CEE_MONO_VTADDR: {
12509 MonoInst *src_var, *src;
12515 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12516 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12521 case CEE_MONO_NEWOBJ: {
12522 MonoInst *iargs [2];
12524 CHECK_STACK_OVF (1);
12526 token = read32 (ip + 2);
12527 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12528 mono_class_init (klass);
12529 NEW_DOMAINCONST (cfg, iargs [0]);
12530 MONO_ADD_INS (cfg->cbb, iargs [0]);
12531 NEW_CLASSCONST (cfg, iargs [1], klass);
12532 MONO_ADD_INS (cfg->cbb, iargs [1]);
12533 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12535 inline_costs += 10 * num_calls++;
12538 case CEE_MONO_OBJADDR:
12541 MONO_INST_NEW (cfg, ins, OP_MOVE);
12542 ins->dreg = alloc_ireg_mp (cfg);
12543 ins->sreg1 = sp [0]->dreg;
12544 ins->type = STACK_MP;
12545 MONO_ADD_INS (cfg->cbb, ins);
12549 case CEE_MONO_LDNATIVEOBJ:
12551 * Similar to LDOBJ, but instead load the unmanaged
12552 * representation of the vtype to the stack.
12557 token = read32 (ip + 2);
12558 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12559 g_assert (klass->valuetype);
12560 mono_class_init (klass);
12563 MonoInst *src, *dest, *temp;
12566 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12567 temp->backend.is_pinvoke = 1;
12568 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12569 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12571 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12572 dest->type = STACK_VTYPE;
12573 dest->klass = klass;
12579 case CEE_MONO_RETOBJ: {
12581 * Same as RET, but return the native representation of a vtype
12584 g_assert (cfg->ret);
12585 g_assert (mono_method_signature (method)->pinvoke);
12590 token = read32 (ip + 2);
12591 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12593 if (!cfg->vret_addr) {
12594 g_assert (cfg->ret_var_is_local);
12596 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12598 EMIT_NEW_RETLOADA (cfg, ins);
12600 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12602 if (sp != stack_start)
12605 MONO_INST_NEW (cfg, ins, OP_BR);
12606 ins->inst_target_bb = end_bblock;
12607 MONO_ADD_INS (cfg->cbb, ins);
12608 link_bblock (cfg, cfg->cbb, end_bblock);
12609 start_new_bblock = 1;
12613 case CEE_MONO_CISINST:
12614 case CEE_MONO_CCASTCLASS: {
12619 token = read32 (ip + 2);
12620 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12621 if (ip [1] == CEE_MONO_CISINST)
12622 ins = handle_cisinst (cfg, klass, sp [0]);
12624 ins = handle_ccastclass (cfg, klass, sp [0]);
12629 case CEE_MONO_SAVE_LMF:
12630 case CEE_MONO_RESTORE_LMF:
12633 case CEE_MONO_CLASSCONST:
12634 CHECK_STACK_OVF (1);
12636 token = read32 (ip + 2);
12637 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12640 inline_costs += 10 * num_calls++;
12642 case CEE_MONO_NOT_TAKEN:
12643 cfg->cbb->out_of_line = TRUE;
12646 case CEE_MONO_TLS: {
12649 CHECK_STACK_OVF (1);
12651 key = (MonoTlsKey)read32 (ip + 2);
12652 g_assert (key < TLS_KEY_NUM);
12654 ins = mono_create_tls_get (cfg, key);
12656 if (cfg->compile_aot) {
12658 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12659 ins->dreg = alloc_preg (cfg);
12660 ins->type = STACK_PTR;
12662 g_assert_not_reached ();
12665 ins->type = STACK_PTR;
12666 MONO_ADD_INS (cfg->cbb, ins);
12671 case CEE_MONO_DYN_CALL: {
12672 MonoCallInst *call;
12674 /* It would be easier to call a trampoline, but that would put an
12675 * extra frame on the stack, confusing exception handling. So
12676 * implement it inline using an opcode for now.
12679 if (!cfg->dyn_call_var) {
12680 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12681 /* prevent it from being register allocated */
12682 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12685 /* Has to use a call inst since it local regalloc expects it */
12686 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12687 ins = (MonoInst*)call;
12689 ins->sreg1 = sp [0]->dreg;
12690 ins->sreg2 = sp [1]->dreg;
12691 MONO_ADD_INS (cfg->cbb, ins);
12693 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12696 inline_costs += 10 * num_calls++;
12700 case CEE_MONO_MEMORY_BARRIER: {
12702 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12706 case CEE_MONO_ATOMIC_STORE_I4: {
12707 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12713 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12714 ins->dreg = sp [0]->dreg;
12715 ins->sreg1 = sp [1]->dreg;
12716 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12717 MONO_ADD_INS (cfg->cbb, ins);
12722 case CEE_MONO_JIT_ATTACH: {
12723 MonoInst *args [16], *domain_ins;
12724 MonoInst *ad_ins, *jit_tls_ins;
12725 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12727 g_assert (!mono_threads_is_coop_enabled ());
12729 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12731 EMIT_NEW_PCONST (cfg, ins, NULL);
12732 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12734 ad_ins = mono_get_domain_intrinsic (cfg);
12735 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12737 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12738 NEW_BBLOCK (cfg, next_bb);
12739 NEW_BBLOCK (cfg, call_bb);
12741 if (cfg->compile_aot) {
12742 /* AOT code is only used in the root domain */
12743 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12745 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12747 MONO_ADD_INS (cfg->cbb, ad_ins);
12748 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12751 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12756 MONO_START_BB (cfg, call_bb);
12759 /* AOT code is only used in the root domain */
12760 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12761 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12762 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12765 MONO_START_BB (cfg, next_bb);
12771 case CEE_MONO_JIT_DETACH: {
12772 MonoInst *args [16];
12774 /* Restore the original domain */
12775 dreg = alloc_ireg (cfg);
12776 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12777 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12781 case CEE_MONO_CALLI_EXTRA_ARG: {
12783 MonoMethodSignature *fsig;
12787 * This is the same as CEE_CALLI, but passes an additional argument
12788 * to the called method in llvmonly mode.
12789 * This is only used by delegate invoke wrappers to call the
12790 * actual delegate method.
12792 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12795 token = read32 (ip + 2);
12803 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12806 if (cfg->llvm_only)
12807 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12809 n = fsig->param_count + fsig->hasthis + 1;
12816 if (cfg->llvm_only) {
12818 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12819 * cconv. This is set by mono_init_delegate ().
12821 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12822 MonoInst *callee = addr;
12823 MonoInst *call, *localloc_ins;
12824 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12825 int low_bit_reg = alloc_preg (cfg);
12827 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12828 NEW_BBLOCK (cfg, end_bb);
12830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12832 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12834 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12835 addr = emit_get_rgctx_sig (cfg, context_used,
12836 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12838 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12840 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12841 ins->dreg = alloc_preg (cfg);
12842 ins->inst_imm = 2 * SIZEOF_VOID_P;
12843 MONO_ADD_INS (cfg->cbb, ins);
12844 localloc_ins = ins;
12845 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12846 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12849 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12850 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12852 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12853 MONO_START_BB (cfg, is_gsharedvt_bb);
12854 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12855 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12856 ins->dreg = call->dreg;
12858 MONO_START_BB (cfg, end_bb);
12860 /* Caller uses a normal calling conv */
12862 MonoInst *callee = addr;
12863 MonoInst *call, *localloc_ins;
12864 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12865 int low_bit_reg = alloc_preg (cfg);
12867 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12868 NEW_BBLOCK (cfg, end_bb);
12870 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12872 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12874 /* Normal case: callee uses a normal cconv, no conversion is needed */
12875 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12877 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12878 MONO_START_BB (cfg, is_gsharedvt_bb);
12879 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12880 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12881 MONO_ADD_INS (cfg->cbb, addr);
12883 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12885 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12886 ins->dreg = alloc_preg (cfg);
12887 ins->inst_imm = 2 * SIZEOF_VOID_P;
12888 MONO_ADD_INS (cfg->cbb, ins);
12889 localloc_ins = ins;
12890 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12894 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12895 ins->dreg = call->dreg;
12896 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12898 MONO_START_BB (cfg, end_bb);
12901 /* Same as CEE_CALLI */
12902 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12904 * We pass the address to the gsharedvt trampoline in the rgctx reg
12906 MonoInst *callee = addr;
12908 addr = emit_get_rgctx_sig (cfg, context_used,
12909 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12910 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12912 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12916 if (!MONO_TYPE_IS_VOID (fsig->ret))
12917 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12919 CHECK_CFG_EXCEPTION;
12923 constrained_class = NULL;
12926 case CEE_MONO_LDDOMAIN:
12927 CHECK_STACK_OVF (1);
12928 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12933 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12939 case CEE_PREFIX1: {
12942 case CEE_ARGLIST: {
12943 /* somewhat similar to LDTOKEN */
12944 MonoInst *addr, *vtvar;
12945 CHECK_STACK_OVF (1);
12946 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12948 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12949 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12951 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12952 ins->type = STACK_VTYPE;
12953 ins->klass = mono_defaults.argumenthandle_class;
12963 MonoInst *cmp, *arg1, *arg2;
12971 * The following transforms:
12972 * CEE_CEQ into OP_CEQ
12973 * CEE_CGT into OP_CGT
12974 * CEE_CGT_UN into OP_CGT_UN
12975 * CEE_CLT into OP_CLT
12976 * CEE_CLT_UN into OP_CLT_UN
12978 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12980 MONO_INST_NEW (cfg, ins, cmp->opcode);
12981 cmp->sreg1 = arg1->dreg;
12982 cmp->sreg2 = arg2->dreg;
12983 type_from_op (cfg, cmp, arg1, arg2);
12985 add_widen_op (cfg, cmp, &arg1, &arg2);
12986 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12987 cmp->opcode = OP_LCOMPARE;
12988 else if (arg1->type == STACK_R4)
12989 cmp->opcode = OP_RCOMPARE;
12990 else if (arg1->type == STACK_R8)
12991 cmp->opcode = OP_FCOMPARE;
12993 cmp->opcode = OP_ICOMPARE;
12994 MONO_ADD_INS (cfg->cbb, cmp);
12995 ins->type = STACK_I4;
12996 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12997 type_from_op (cfg, ins, arg1, arg2);
12999 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13001 * The backends expect the fceq opcodes to do the
13004 ins->sreg1 = cmp->sreg1;
13005 ins->sreg2 = cmp->sreg2;
13008 MONO_ADD_INS (cfg->cbb, ins);
13014 MonoInst *argconst;
13015 MonoMethod *cil_method;
13017 CHECK_STACK_OVF (1);
13019 n = read32 (ip + 2);
13020 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13023 mono_class_init (cmethod->klass);
13025 mono_save_token_info (cfg, image, n, cmethod);
13027 context_used = mini_method_check_context_used (cfg, cmethod);
13029 cil_method = cmethod;
13030 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13031 emit_method_access_failure (cfg, method, cil_method);
13033 if (mono_security_core_clr_enabled ())
13034 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13037 * Optimize the common case of ldftn+delegate creation
13039 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13040 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13041 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13042 MonoInst *target_ins, *handle_ins;
13043 MonoMethod *invoke;
13044 int invoke_context_used;
13046 invoke = mono_get_delegate_invoke (ctor_method->klass);
13047 if (!invoke || !mono_method_signature (invoke))
13050 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13052 target_ins = sp [-1];
13054 if (mono_security_core_clr_enabled ())
13055 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13057 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13058 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13059 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13061 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13065 /* FIXME: SGEN support */
13066 if (invoke_context_used == 0 || cfg->llvm_only) {
13068 if (cfg->verbose_level > 3)
13069 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13070 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13073 CHECK_CFG_EXCEPTION;
13083 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13084 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13088 inline_costs += 10 * num_calls++;
13091 case CEE_LDVIRTFTN: {
13092 MonoInst *args [2];
13096 n = read32 (ip + 2);
13097 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13100 mono_class_init (cmethod->klass);
13102 context_used = mini_method_check_context_used (cfg, cmethod);
13104 if (mono_security_core_clr_enabled ())
13105 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13108 * Optimize the common case of ldvirtftn+delegate creation
13110 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13111 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13112 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13113 MonoInst *target_ins, *handle_ins;
13114 MonoMethod *invoke;
13115 int invoke_context_used;
13116 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13118 invoke = mono_get_delegate_invoke (ctor_method->klass);
13119 if (!invoke || !mono_method_signature (invoke))
13122 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13124 target_ins = sp [-1];
13126 if (mono_security_core_clr_enabled ())
13127 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13129 /* FIXME: SGEN support */
13130 if (invoke_context_used == 0 || cfg->llvm_only) {
13132 if (cfg->verbose_level > 3)
13133 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13134 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13137 CHECK_CFG_EXCEPTION;
13150 args [1] = emit_get_rgctx_method (cfg, context_used,
13151 cmethod, MONO_RGCTX_INFO_METHOD);
13154 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13156 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13159 inline_costs += 10 * num_calls++;
13163 CHECK_STACK_OVF (1);
13165 n = read16 (ip + 2);
13167 EMIT_NEW_ARGLOAD (cfg, ins, n);
13172 CHECK_STACK_OVF (1);
13174 n = read16 (ip + 2);
13176 NEW_ARGLOADA (cfg, ins, n);
13177 MONO_ADD_INS (cfg->cbb, ins);
13185 n = read16 (ip + 2);
13187 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13189 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13193 CHECK_STACK_OVF (1);
13195 n = read16 (ip + 2);
13197 EMIT_NEW_LOCLOAD (cfg, ins, n);
13202 unsigned char *tmp_ip;
13203 CHECK_STACK_OVF (1);
13205 n = read16 (ip + 2);
13208 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13214 EMIT_NEW_LOCLOADA (cfg, ins, n);
13223 n = read16 (ip + 2);
13225 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13227 emit_stloc_ir (cfg, sp, header, n);
13234 if (sp != stack_start)
13236 if (cfg->method != method)
13238 * Inlining this into a loop in a parent could lead to
13239 * stack overflows which is different behavior than the
13240 * non-inlined case, thus disable inlining in this case.
13242 INLINE_FAILURE("localloc");
13244 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13245 ins->dreg = alloc_preg (cfg);
13246 ins->sreg1 = sp [0]->dreg;
13247 ins->type = STACK_PTR;
13248 MONO_ADD_INS (cfg->cbb, ins);
13250 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13252 ins->flags |= MONO_INST_INIT;
13257 case CEE_ENDFILTER: {
13258 MonoExceptionClause *clause, *nearest;
13263 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13265 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13266 ins->sreg1 = (*sp)->dreg;
13267 MONO_ADD_INS (cfg->cbb, ins);
13268 start_new_bblock = 1;
13272 for (cc = 0; cc < header->num_clauses; ++cc) {
13273 clause = &header->clauses [cc];
13274 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13275 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13276 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13279 g_assert (nearest);
13280 if ((ip - header->code) != nearest->handler_offset)
13285 case CEE_UNALIGNED_:
13286 ins_flag |= MONO_INST_UNALIGNED;
13287 /* FIXME: record alignment? we can assume 1 for now */
13291 case CEE_VOLATILE_:
13292 ins_flag |= MONO_INST_VOLATILE;
13296 ins_flag |= MONO_INST_TAILCALL;
13297 cfg->flags |= MONO_CFG_HAS_TAIL;
13298 /* Can't inline tail calls at this time */
13299 inline_costs += 100000;
13306 token = read32 (ip + 2);
13307 klass = mini_get_class (method, token, generic_context);
13308 CHECK_TYPELOAD (klass);
13309 if (generic_class_is_reference_type (cfg, klass))
13310 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13312 mini_emit_initobj (cfg, *sp, NULL, klass);
13316 case CEE_CONSTRAINED_:
13318 token = read32 (ip + 2);
13319 constrained_class = mini_get_class (method, token, generic_context);
13320 CHECK_TYPELOAD (constrained_class);
13324 case CEE_INITBLK: {
13325 MonoInst *iargs [3];
13329 /* Skip optimized paths for volatile operations. */
13330 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13331 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13332 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13333 /* emit_memset only works when val == 0 */
13334 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13337 iargs [0] = sp [0];
13338 iargs [1] = sp [1];
13339 iargs [2] = sp [2];
13340 if (ip [1] == CEE_CPBLK) {
13342 * FIXME: It's unclear whether we should be emitting both the acquire
13343 * and release barriers for cpblk. It is technically both a load and
13344 * store operation, so it seems like that's the sensible thing to do.
13346 * FIXME: We emit full barriers on both sides of the operation for
13347 * simplicity. We should have a separate atomic memcpy method instead.
13349 MonoMethod *memcpy_method = get_memcpy_method ();
13351 if (ins_flag & MONO_INST_VOLATILE)
13352 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13354 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13355 call->flags |= ins_flag;
13357 if (ins_flag & MONO_INST_VOLATILE)
13358 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13360 MonoMethod *memset_method = get_memset_method ();
13361 if (ins_flag & MONO_INST_VOLATILE) {
13362 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13363 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13365 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13366 call->flags |= ins_flag;
13377 ins_flag |= MONO_INST_NOTYPECHECK;
13379 ins_flag |= MONO_INST_NORANGECHECK;
13380 /* we ignore the no-nullcheck for now since we
13381 * really do it explicitly only when doing callvirt->call
13385 case CEE_RETHROW: {
13387 int handler_offset = -1;
13389 for (i = 0; i < header->num_clauses; ++i) {
13390 MonoExceptionClause *clause = &header->clauses [i];
13391 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13392 handler_offset = clause->handler_offset;
13397 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13399 if (handler_offset == -1)
13402 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13403 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13404 ins->sreg1 = load->dreg;
13405 MONO_ADD_INS (cfg->cbb, ins);
13407 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13408 MONO_ADD_INS (cfg->cbb, ins);
13411 link_bblock (cfg, cfg->cbb, end_bblock);
13412 start_new_bblock = 1;
13420 CHECK_STACK_OVF (1);
13422 token = read32 (ip + 2);
13423 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13424 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13427 val = mono_type_size (type, &ialign);
13429 MonoClass *klass = mini_get_class (method, token, generic_context);
13430 CHECK_TYPELOAD (klass);
13432 val = mono_type_size (&klass->byval_arg, &ialign);
13434 if (mini_is_gsharedvt_klass (klass))
13435 GSHAREDVT_FAILURE (*ip);
13437 EMIT_NEW_ICONST (cfg, ins, val);
13442 case CEE_REFANYTYPE: {
13443 MonoInst *src_var, *src;
13445 GSHAREDVT_FAILURE (*ip);
13451 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13453 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13454 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13455 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13460 case CEE_READONLY_:
13473 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13483 g_warning ("opcode 0x%02x not handled", *ip);
13487 if (start_new_bblock != 1)
13490 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13491 if (cfg->cbb->next_bb) {
13492 /* This could already be set because of inlining, #693905 */
13493 MonoBasicBlock *bb = cfg->cbb;
13495 while (bb->next_bb)
13497 bb->next_bb = end_bblock;
13499 cfg->cbb->next_bb = end_bblock;
13502 if (cfg->method == method && cfg->domainvar) {
13504 MonoInst *get_domain;
13506 cfg->cbb = init_localsbb;
13508 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13509 MONO_ADD_INS (cfg->cbb, get_domain);
13511 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13513 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13514 MONO_ADD_INS (cfg->cbb, store);
13517 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13518 if (cfg->compile_aot)
13519 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13520 mono_get_got_var (cfg);
13523 if (cfg->method == method && cfg->got_var)
13524 mono_emit_load_got_addr (cfg);
13526 if (init_localsbb) {
13527 cfg->cbb = init_localsbb;
13529 for (i = 0; i < header->num_locals; ++i) {
13530 emit_init_local (cfg, i, header->locals [i], init_locals);
13534 if (cfg->init_ref_vars && cfg->method == method) {
13535 /* Emit initialization for ref vars */
13536 // FIXME: Avoid duplication initialization for IL locals.
13537 for (i = 0; i < cfg->num_varinfo; ++i) {
13538 MonoInst *ins = cfg->varinfo [i];
13540 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13541 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13545 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13546 cfg->cbb = init_localsbb;
13547 emit_push_lmf (cfg);
13550 cfg->cbb = init_localsbb;
13551 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13554 MonoBasicBlock *bb;
13557 * Make seq points at backward branch targets interruptable.
13559 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13560 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13561 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13564 /* Add a sequence point for method entry/exit events */
13565 if (seq_points && cfg->gen_sdb_seq_points) {
13566 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13567 MONO_ADD_INS (init_localsbb, ins);
13568 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13569 MONO_ADD_INS (cfg->bb_exit, ins);
13573 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13574 * the code they refer to was dead (#11880).
13576 if (sym_seq_points) {
13577 for (i = 0; i < header->code_size; ++i) {
13578 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13581 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13582 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13589 if (cfg->method == method) {
13590 MonoBasicBlock *bb;
13591 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13592 bb->region = mono_find_block_region (cfg, bb->real_offset);
13594 mono_create_spvar_for_region (cfg, bb->region);
13595 if (cfg->verbose_level > 2)
13596 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13599 MonoBasicBlock *bb;
13600 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13601 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13602 bb->real_offset = inline_offset;
13606 if (inline_costs < 0) {
13609 /* Method is too large */
13610 mname = mono_method_full_name (method, TRUE);
13611 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13615 if ((cfg->verbose_level > 2) && (cfg->method == method))
13616 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13621 g_assert (!mono_error_ok (&cfg->error));
13625 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13629 set_exception_type_from_invalid_il (cfg, method, ip);
13633 g_slist_free (class_inits);
13634 mono_basic_block_free (original_bb);
13635 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13636 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13637 if (cfg->exception_type)
13640 return inline_costs;
13644 store_membase_reg_to_store_membase_imm (int opcode)
13647 case OP_STORE_MEMBASE_REG:
13648 return OP_STORE_MEMBASE_IMM;
13649 case OP_STOREI1_MEMBASE_REG:
13650 return OP_STOREI1_MEMBASE_IMM;
13651 case OP_STOREI2_MEMBASE_REG:
13652 return OP_STOREI2_MEMBASE_IMM;
13653 case OP_STOREI4_MEMBASE_REG:
13654 return OP_STOREI4_MEMBASE_IMM;
13655 case OP_STOREI8_MEMBASE_REG:
13656 return OP_STOREI8_MEMBASE_IMM;
13658 g_assert_not_reached ();
13665 mono_op_to_op_imm (int opcode)
13669 return OP_IADD_IMM;
13671 return OP_ISUB_IMM;
13673 return OP_IDIV_IMM;
13675 return OP_IDIV_UN_IMM;
13677 return OP_IREM_IMM;
13679 return OP_IREM_UN_IMM;
13681 return OP_IMUL_IMM;
13683 return OP_IAND_IMM;
13687 return OP_IXOR_IMM;
13689 return OP_ISHL_IMM;
13691 return OP_ISHR_IMM;
13693 return OP_ISHR_UN_IMM;
13696 return OP_LADD_IMM;
13698 return OP_LSUB_IMM;
13700 return OP_LAND_IMM;
13704 return OP_LXOR_IMM;
13706 return OP_LSHL_IMM;
13708 return OP_LSHR_IMM;
13710 return OP_LSHR_UN_IMM;
13711 #if SIZEOF_REGISTER == 8
13713 return OP_LREM_IMM;
13717 return OP_COMPARE_IMM;
13719 return OP_ICOMPARE_IMM;
13721 return OP_LCOMPARE_IMM;
13723 case OP_STORE_MEMBASE_REG:
13724 return OP_STORE_MEMBASE_IMM;
13725 case OP_STOREI1_MEMBASE_REG:
13726 return OP_STOREI1_MEMBASE_IMM;
13727 case OP_STOREI2_MEMBASE_REG:
13728 return OP_STOREI2_MEMBASE_IMM;
13729 case OP_STOREI4_MEMBASE_REG:
13730 return OP_STOREI4_MEMBASE_IMM;
13732 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13734 return OP_X86_PUSH_IMM;
13735 case OP_X86_COMPARE_MEMBASE_REG:
13736 return OP_X86_COMPARE_MEMBASE_IMM;
13738 #if defined(TARGET_AMD64)
13739 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13740 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13742 case OP_VOIDCALL_REG:
13743 return OP_VOIDCALL;
13751 return OP_LOCALLOC_IMM;
13758 ldind_to_load_membase (int opcode)
13762 return OP_LOADI1_MEMBASE;
13764 return OP_LOADU1_MEMBASE;
13766 return OP_LOADI2_MEMBASE;
13768 return OP_LOADU2_MEMBASE;
13770 return OP_LOADI4_MEMBASE;
13772 return OP_LOADU4_MEMBASE;
13774 return OP_LOAD_MEMBASE;
13775 case CEE_LDIND_REF:
13776 return OP_LOAD_MEMBASE;
13778 return OP_LOADI8_MEMBASE;
13780 return OP_LOADR4_MEMBASE;
13782 return OP_LOADR8_MEMBASE;
13784 g_assert_not_reached ();
13791 stind_to_store_membase (int opcode)
13795 return OP_STOREI1_MEMBASE_REG;
13797 return OP_STOREI2_MEMBASE_REG;
13799 return OP_STOREI4_MEMBASE_REG;
13801 case CEE_STIND_REF:
13802 return OP_STORE_MEMBASE_REG;
13804 return OP_STOREI8_MEMBASE_REG;
13806 return OP_STORER4_MEMBASE_REG;
13808 return OP_STORER8_MEMBASE_REG;
13810 g_assert_not_reached ();
13817 mono_load_membase_to_load_mem (int opcode)
13819 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13820 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13822 case OP_LOAD_MEMBASE:
13823 return OP_LOAD_MEM;
13824 case OP_LOADU1_MEMBASE:
13825 return OP_LOADU1_MEM;
13826 case OP_LOADU2_MEMBASE:
13827 return OP_LOADU2_MEM;
13828 case OP_LOADI4_MEMBASE:
13829 return OP_LOADI4_MEM;
13830 case OP_LOADU4_MEMBASE:
13831 return OP_LOADU4_MEM;
13832 #if SIZEOF_REGISTER == 8
13833 case OP_LOADI8_MEMBASE:
13834 return OP_LOADI8_MEM;
13843 op_to_op_dest_membase (int store_opcode, int opcode)
13845 #if defined(TARGET_X86)
13846 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13851 return OP_X86_ADD_MEMBASE_REG;
13853 return OP_X86_SUB_MEMBASE_REG;
13855 return OP_X86_AND_MEMBASE_REG;
13857 return OP_X86_OR_MEMBASE_REG;
13859 return OP_X86_XOR_MEMBASE_REG;
13862 return OP_X86_ADD_MEMBASE_IMM;
13865 return OP_X86_SUB_MEMBASE_IMM;
13868 return OP_X86_AND_MEMBASE_IMM;
13871 return OP_X86_OR_MEMBASE_IMM;
13874 return OP_X86_XOR_MEMBASE_IMM;
13880 #if defined(TARGET_AMD64)
13881 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13886 return OP_X86_ADD_MEMBASE_REG;
13888 return OP_X86_SUB_MEMBASE_REG;
13890 return OP_X86_AND_MEMBASE_REG;
13892 return OP_X86_OR_MEMBASE_REG;
13894 return OP_X86_XOR_MEMBASE_REG;
13896 return OP_X86_ADD_MEMBASE_IMM;
13898 return OP_X86_SUB_MEMBASE_IMM;
13900 return OP_X86_AND_MEMBASE_IMM;
13902 return OP_X86_OR_MEMBASE_IMM;
13904 return OP_X86_XOR_MEMBASE_IMM;
13906 return OP_AMD64_ADD_MEMBASE_REG;
13908 return OP_AMD64_SUB_MEMBASE_REG;
13910 return OP_AMD64_AND_MEMBASE_REG;
13912 return OP_AMD64_OR_MEMBASE_REG;
13914 return OP_AMD64_XOR_MEMBASE_REG;
13917 return OP_AMD64_ADD_MEMBASE_IMM;
13920 return OP_AMD64_SUB_MEMBASE_IMM;
13923 return OP_AMD64_AND_MEMBASE_IMM;
13926 return OP_AMD64_OR_MEMBASE_IMM;
13929 return OP_AMD64_XOR_MEMBASE_IMM;
13939 op_to_op_store_membase (int store_opcode, int opcode)
13941 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13944 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13945 return OP_X86_SETEQ_MEMBASE;
13947 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13948 return OP_X86_SETNE_MEMBASE;
13956 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13959 /* FIXME: This has sign extension issues */
13961 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13962 return OP_X86_COMPARE_MEMBASE8_IMM;
13965 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13970 return OP_X86_PUSH_MEMBASE;
13971 case OP_COMPARE_IMM:
13972 case OP_ICOMPARE_IMM:
13973 return OP_X86_COMPARE_MEMBASE_IMM;
13976 return OP_X86_COMPARE_MEMBASE_REG;
13980 #ifdef TARGET_AMD64
13981 /* FIXME: This has sign extension issues */
13983 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13984 return OP_X86_COMPARE_MEMBASE8_IMM;
13989 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13990 return OP_X86_PUSH_MEMBASE;
13992 /* FIXME: This only works for 32 bit immediates
13993 case OP_COMPARE_IMM:
13994 case OP_LCOMPARE_IMM:
13995 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13996 return OP_AMD64_COMPARE_MEMBASE_IMM;
13998 case OP_ICOMPARE_IMM:
13999 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14000 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14004 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14005 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14006 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14007 return OP_AMD64_COMPARE_MEMBASE_REG;
14010 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14011 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14020 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14023 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14029 return OP_X86_COMPARE_REG_MEMBASE;
14031 return OP_X86_ADD_REG_MEMBASE;
14033 return OP_X86_SUB_REG_MEMBASE;
14035 return OP_X86_AND_REG_MEMBASE;
14037 return OP_X86_OR_REG_MEMBASE;
14039 return OP_X86_XOR_REG_MEMBASE;
14043 #ifdef TARGET_AMD64
14044 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14047 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14049 return OP_X86_ADD_REG_MEMBASE;
14051 return OP_X86_SUB_REG_MEMBASE;
14053 return OP_X86_AND_REG_MEMBASE;
14055 return OP_X86_OR_REG_MEMBASE;
14057 return OP_X86_XOR_REG_MEMBASE;
14059 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14063 return OP_AMD64_COMPARE_REG_MEMBASE;
14065 return OP_AMD64_ADD_REG_MEMBASE;
14067 return OP_AMD64_SUB_REG_MEMBASE;
14069 return OP_AMD64_AND_REG_MEMBASE;
14071 return OP_AMD64_OR_REG_MEMBASE;
14073 return OP_AMD64_XOR_REG_MEMBASE;
14082 mono_op_to_op_imm_noemul (int opcode)
14085 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14091 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14098 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14103 return mono_op_to_op_imm (opcode);
14108 * mono_handle_global_vregs:
14110 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14114 mono_handle_global_vregs (MonoCompile *cfg)
14116 gint32 *vreg_to_bb;
14117 MonoBasicBlock *bb;
14120 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14122 #ifdef MONO_ARCH_SIMD_INTRINSICS
14123 if (cfg->uses_simd_intrinsics)
14124 mono_simd_simplify_indirection (cfg);
14127 /* Find local vregs used in more than one bb */
14128 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14129 MonoInst *ins = bb->code;
14130 int block_num = bb->block_num;
14132 if (cfg->verbose_level > 2)
14133 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14136 for (; ins; ins = ins->next) {
14137 const char *spec = INS_INFO (ins->opcode);
14138 int regtype = 0, regindex;
14141 if (G_UNLIKELY (cfg->verbose_level > 2))
14142 mono_print_ins (ins);
14144 g_assert (ins->opcode >= MONO_CEE_LAST);
14146 for (regindex = 0; regindex < 4; regindex ++) {
14149 if (regindex == 0) {
14150 regtype = spec [MONO_INST_DEST];
14151 if (regtype == ' ')
14154 } else if (regindex == 1) {
14155 regtype = spec [MONO_INST_SRC1];
14156 if (regtype == ' ')
14159 } else if (regindex == 2) {
14160 regtype = spec [MONO_INST_SRC2];
14161 if (regtype == ' ')
14164 } else if (regindex == 3) {
14165 regtype = spec [MONO_INST_SRC3];
14166 if (regtype == ' ')
14171 #if SIZEOF_REGISTER == 4
14172 /* In the LLVM case, the long opcodes are not decomposed */
14173 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14175 * Since some instructions reference the original long vreg,
14176 * and some reference the two component vregs, it is quite hard
14177 * to determine when it needs to be global. So be conservative.
14179 if (!get_vreg_to_inst (cfg, vreg)) {
14180 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14182 if (cfg->verbose_level > 2)
14183 printf ("LONG VREG R%d made global.\n", vreg);
14187 * Make the component vregs volatile since the optimizations can
14188 * get confused otherwise.
14190 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14191 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14195 g_assert (vreg != -1);
14197 prev_bb = vreg_to_bb [vreg];
14198 if (prev_bb == 0) {
14199 /* 0 is a valid block num */
14200 vreg_to_bb [vreg] = block_num + 1;
14201 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14202 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14205 if (!get_vreg_to_inst (cfg, vreg)) {
14206 if (G_UNLIKELY (cfg->verbose_level > 2))
14207 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14211 if (vreg_is_ref (cfg, vreg))
14212 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14214 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14217 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14220 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14223 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14226 g_assert_not_reached ();
14230 /* Flag as having been used in more than one bb */
14231 vreg_to_bb [vreg] = -1;
14237 /* If a variable is used in only one bblock, convert it into a local vreg */
14238 for (i = 0; i < cfg->num_varinfo; i++) {
14239 MonoInst *var = cfg->varinfo [i];
14240 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14242 switch (var->type) {
14248 #if SIZEOF_REGISTER == 8
14251 #if !defined(TARGET_X86)
14252 /* Enabling this screws up the fp stack on x86 */
14255 if (mono_arch_is_soft_float ())
14259 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14263 /* Arguments are implicitly global */
14264 /* Putting R4 vars into registers doesn't work currently */
14265 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14266 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14268 * Make that the variable's liveness interval doesn't contain a call, since
14269 * that would cause the lvreg to be spilled, making the whole optimization
14272 /* This is too slow for JIT compilation */
14274 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14276 int def_index, call_index, ins_index;
14277 gboolean spilled = FALSE;
14282 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14283 const char *spec = INS_INFO (ins->opcode);
14285 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14286 def_index = ins_index;
14288 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14289 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14290 if (call_index > def_index) {
14296 if (MONO_IS_CALL (ins))
14297 call_index = ins_index;
14307 if (G_UNLIKELY (cfg->verbose_level > 2))
14308 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14309 var->flags |= MONO_INST_IS_DEAD;
14310 cfg->vreg_to_inst [var->dreg] = NULL;
14317 * Compress the varinfo and vars tables so the liveness computation is faster and
14318 * takes up less space.
14321 for (i = 0; i < cfg->num_varinfo; ++i) {
14322 MonoInst *var = cfg->varinfo [i];
14323 if (pos < i && cfg->locals_start == i)
14324 cfg->locals_start = pos;
14325 if (!(var->flags & MONO_INST_IS_DEAD)) {
14327 cfg->varinfo [pos] = cfg->varinfo [i];
14328 cfg->varinfo [pos]->inst_c0 = pos;
14329 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14330 cfg->vars [pos].idx = pos;
14331 #if SIZEOF_REGISTER == 4
14332 if (cfg->varinfo [pos]->type == STACK_I8) {
14333 /* Modify the two component vars too */
14336 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14337 var1->inst_c0 = pos;
14338 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14339 var1->inst_c0 = pos;
14346 cfg->num_varinfo = pos;
14347 if (cfg->locals_start > cfg->num_varinfo)
14348 cfg->locals_start = cfg->num_varinfo;
14352 * mono_allocate_gsharedvt_vars:
14354 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14355 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14358 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14362 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14364 for (i = 0; i < cfg->num_varinfo; ++i) {
14365 MonoInst *ins = cfg->varinfo [i];
14368 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14369 if (i >= cfg->locals_start) {
14371 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14372 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14373 ins->opcode = OP_GSHAREDVT_LOCAL;
14374 ins->inst_imm = idx;
14377 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14378 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14385 * mono_spill_global_vars:
14387 * Generate spill code for variables which are not allocated to registers,
14388 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14389 * code is generated which could be optimized by the local optimization passes.
14392 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14394 MonoBasicBlock *bb;
14396 int orig_next_vreg;
14397 guint32 *vreg_to_lvreg;
14399 guint32 i, lvregs_len;
14400 gboolean dest_has_lvreg = FALSE;
14401 MonoStackType stacktypes [128];
14402 MonoInst **live_range_start, **live_range_end;
14403 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14405 *need_local_opts = FALSE;
14407 memset (spec2, 0, sizeof (spec2));
14409 /* FIXME: Move this function to mini.c */
14410 stacktypes ['i'] = STACK_PTR;
14411 stacktypes ['l'] = STACK_I8;
14412 stacktypes ['f'] = STACK_R8;
14413 #ifdef MONO_ARCH_SIMD_INTRINSICS
14414 stacktypes ['x'] = STACK_VTYPE;
14417 #if SIZEOF_REGISTER == 4
14418 /* Create MonoInsts for longs */
14419 for (i = 0; i < cfg->num_varinfo; i++) {
14420 MonoInst *ins = cfg->varinfo [i];
14422 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14423 switch (ins->type) {
14428 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14431 g_assert (ins->opcode == OP_REGOFFSET);
14433 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14435 tree->opcode = OP_REGOFFSET;
14436 tree->inst_basereg = ins->inst_basereg;
14437 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14439 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14441 tree->opcode = OP_REGOFFSET;
14442 tree->inst_basereg = ins->inst_basereg;
14443 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14453 if (cfg->compute_gc_maps) {
14454 /* registers need liveness info even for !non refs */
14455 for (i = 0; i < cfg->num_varinfo; i++) {
14456 MonoInst *ins = cfg->varinfo [i];
14458 if (ins->opcode == OP_REGVAR)
14459 ins->flags |= MONO_INST_GC_TRACK;
14463 /* FIXME: widening and truncation */
14466 * As an optimization, when a variable allocated to the stack is first loaded into
14467 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14468 * the variable again.
14470 orig_next_vreg = cfg->next_vreg;
14471 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14472 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14476 * These arrays contain the first and last instructions accessing a given
14478 * Since we emit bblocks in the same order we process them here, and we
14479 * don't split live ranges, these will precisely describe the live range of
14480 * the variable, i.e. the instruction range where a valid value can be found
14481 * in the variables location.
14482 * The live range is computed using the liveness info computed by the liveness pass.
14483 * We can't use vmv->range, since that is an abstract live range, and we need
14484 * one which is instruction precise.
14485 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14487 /* FIXME: Only do this if debugging info is requested */
14488 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14489 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14490 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14491 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14493 /* Add spill loads/stores */
14494 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14497 if (cfg->verbose_level > 2)
14498 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14500 /* Clear vreg_to_lvreg array */
14501 for (i = 0; i < lvregs_len; i++)
14502 vreg_to_lvreg [lvregs [i]] = 0;
14506 MONO_BB_FOR_EACH_INS (bb, ins) {
14507 const char *spec = INS_INFO (ins->opcode);
14508 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14509 gboolean store, no_lvreg;
14510 int sregs [MONO_MAX_SRC_REGS];
14512 if (G_UNLIKELY (cfg->verbose_level > 2))
14513 mono_print_ins (ins);
14515 if (ins->opcode == OP_NOP)
14519 * We handle LDADDR here as well, since it can only be decomposed
14520 * when variable addresses are known.
14522 if (ins->opcode == OP_LDADDR) {
14523 MonoInst *var = (MonoInst *)ins->inst_p0;
14525 if (var->opcode == OP_VTARG_ADDR) {
14526 /* Happens on SPARC/S390 where vtypes are passed by reference */
14527 MonoInst *vtaddr = var->inst_left;
14528 if (vtaddr->opcode == OP_REGVAR) {
14529 ins->opcode = OP_MOVE;
14530 ins->sreg1 = vtaddr->dreg;
14532 else if (var->inst_left->opcode == OP_REGOFFSET) {
14533 ins->opcode = OP_LOAD_MEMBASE;
14534 ins->inst_basereg = vtaddr->inst_basereg;
14535 ins->inst_offset = vtaddr->inst_offset;
14538 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14539 /* gsharedvt arg passed by ref */
14540 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14542 ins->opcode = OP_LOAD_MEMBASE;
14543 ins->inst_basereg = var->inst_basereg;
14544 ins->inst_offset = var->inst_offset;
14545 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14546 MonoInst *load, *load2, *load3;
14547 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14548 int reg1, reg2, reg3;
14549 MonoInst *info_var = cfg->gsharedvt_info_var;
14550 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14554 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14557 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14559 g_assert (info_var);
14560 g_assert (locals_var);
14562 /* Mark the instruction used to compute the locals var as used */
14563 cfg->gsharedvt_locals_var_ins = NULL;
14565 /* Load the offset */
14566 if (info_var->opcode == OP_REGOFFSET) {
14567 reg1 = alloc_ireg (cfg);
14568 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14569 } else if (info_var->opcode == OP_REGVAR) {
14571 reg1 = info_var->dreg;
14573 g_assert_not_reached ();
14575 reg2 = alloc_ireg (cfg);
14576 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14577 /* Load the locals area address */
14578 reg3 = alloc_ireg (cfg);
14579 if (locals_var->opcode == OP_REGOFFSET) {
14580 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14581 } else if (locals_var->opcode == OP_REGVAR) {
14582 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14584 g_assert_not_reached ();
14586 /* Compute the address */
14587 ins->opcode = OP_PADD;
14591 mono_bblock_insert_before_ins (bb, ins, load3);
14592 mono_bblock_insert_before_ins (bb, load3, load2);
14594 mono_bblock_insert_before_ins (bb, load2, load);
14596 g_assert (var->opcode == OP_REGOFFSET);
14598 ins->opcode = OP_ADD_IMM;
14599 ins->sreg1 = var->inst_basereg;
14600 ins->inst_imm = var->inst_offset;
14603 *need_local_opts = TRUE;
14604 spec = INS_INFO (ins->opcode);
14607 if (ins->opcode < MONO_CEE_LAST) {
14608 mono_print_ins (ins);
14609 g_assert_not_reached ();
14613 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14617 if (MONO_IS_STORE_MEMBASE (ins)) {
14618 tmp_reg = ins->dreg;
14619 ins->dreg = ins->sreg2;
14620 ins->sreg2 = tmp_reg;
14623 spec2 [MONO_INST_DEST] = ' ';
14624 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14625 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14626 spec2 [MONO_INST_SRC3] = ' ';
14628 } else if (MONO_IS_STORE_MEMINDEX (ins))
14629 g_assert_not_reached ();
14634 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14635 printf ("\t %.3s %d", spec, ins->dreg);
14636 num_sregs = mono_inst_get_src_registers (ins, sregs);
14637 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14638 printf (" %d", sregs [srcindex]);
14645 regtype = spec [MONO_INST_DEST];
14646 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14649 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14650 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14651 MonoInst *store_ins;
14653 MonoInst *def_ins = ins;
14654 int dreg = ins->dreg; /* The original vreg */
14656 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14658 if (var->opcode == OP_REGVAR) {
14659 ins->dreg = var->dreg;
14660 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14662 * Instead of emitting a load+store, use a _membase opcode.
14664 g_assert (var->opcode == OP_REGOFFSET);
14665 if (ins->opcode == OP_MOVE) {
14669 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14670 ins->inst_basereg = var->inst_basereg;
14671 ins->inst_offset = var->inst_offset;
14674 spec = INS_INFO (ins->opcode);
14678 g_assert (var->opcode == OP_REGOFFSET);
14680 prev_dreg = ins->dreg;
14682 /* Invalidate any previous lvreg for this vreg */
14683 vreg_to_lvreg [ins->dreg] = 0;
14687 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14689 store_opcode = OP_STOREI8_MEMBASE_REG;
14692 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14694 #if SIZEOF_REGISTER != 8
14695 if (regtype == 'l') {
14696 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14697 mono_bblock_insert_after_ins (bb, ins, store_ins);
14698 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14699 mono_bblock_insert_after_ins (bb, ins, store_ins);
14700 def_ins = store_ins;
14705 g_assert (store_opcode != OP_STOREV_MEMBASE);
14707 /* Try to fuse the store into the instruction itself */
14708 /* FIXME: Add more instructions */
14709 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14710 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14711 ins->inst_imm = ins->inst_c0;
14712 ins->inst_destbasereg = var->inst_basereg;
14713 ins->inst_offset = var->inst_offset;
14714 spec = INS_INFO (ins->opcode);
14715 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14716 ins->opcode = store_opcode;
14717 ins->inst_destbasereg = var->inst_basereg;
14718 ins->inst_offset = var->inst_offset;
14722 tmp_reg = ins->dreg;
14723 ins->dreg = ins->sreg2;
14724 ins->sreg2 = tmp_reg;
14727 spec2 [MONO_INST_DEST] = ' ';
14728 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14729 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14730 spec2 [MONO_INST_SRC3] = ' ';
14732 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14733 // FIXME: The backends expect the base reg to be in inst_basereg
14734 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14736 ins->inst_basereg = var->inst_basereg;
14737 ins->inst_offset = var->inst_offset;
14738 spec = INS_INFO (ins->opcode);
14740 /* printf ("INS: "); mono_print_ins (ins); */
14741 /* Create a store instruction */
14742 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14744 /* Insert it after the instruction */
14745 mono_bblock_insert_after_ins (bb, ins, store_ins);
14747 def_ins = store_ins;
14750 * We can't assign ins->dreg to var->dreg here, since the
14751 * sregs could use it. So set a flag, and do it after
14754 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14755 dest_has_lvreg = TRUE;
14760 if (def_ins && !live_range_start [dreg]) {
14761 live_range_start [dreg] = def_ins;
14762 live_range_start_bb [dreg] = bb;
14765 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14768 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14769 tmp->inst_c1 = dreg;
14770 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14777 num_sregs = mono_inst_get_src_registers (ins, sregs);
14778 for (srcindex = 0; srcindex < 3; ++srcindex) {
14779 regtype = spec [MONO_INST_SRC1 + srcindex];
14780 sreg = sregs [srcindex];
14782 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14783 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14784 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14785 MonoInst *use_ins = ins;
14786 MonoInst *load_ins;
14787 guint32 load_opcode;
14789 if (var->opcode == OP_REGVAR) {
14790 sregs [srcindex] = var->dreg;
14791 //mono_inst_set_src_registers (ins, sregs);
14792 live_range_end [sreg] = use_ins;
14793 live_range_end_bb [sreg] = bb;
14795 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14798 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14799 /* var->dreg is a hreg */
14800 tmp->inst_c1 = sreg;
14801 mono_bblock_insert_after_ins (bb, ins, tmp);
14807 g_assert (var->opcode == OP_REGOFFSET);
14809 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14811 g_assert (load_opcode != OP_LOADV_MEMBASE);
14813 if (vreg_to_lvreg [sreg]) {
14814 g_assert (vreg_to_lvreg [sreg] != -1);
14816 /* The variable is already loaded to an lvreg */
14817 if (G_UNLIKELY (cfg->verbose_level > 2))
14818 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14819 sregs [srcindex] = vreg_to_lvreg [sreg];
14820 //mono_inst_set_src_registers (ins, sregs);
14824 /* Try to fuse the load into the instruction */
14825 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14826 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14827 sregs [0] = var->inst_basereg;
14828 //mono_inst_set_src_registers (ins, sregs);
14829 ins->inst_offset = var->inst_offset;
14830 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14831 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14832 sregs [1] = var->inst_basereg;
14833 //mono_inst_set_src_registers (ins, sregs);
14834 ins->inst_offset = var->inst_offset;
14836 if (MONO_IS_REAL_MOVE (ins)) {
14837 ins->opcode = OP_NOP;
14840 //printf ("%d ", srcindex); mono_print_ins (ins);
14842 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14844 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14845 if (var->dreg == prev_dreg) {
14847 * sreg refers to the value loaded by the load
14848 * emitted below, but we need to use ins->dreg
14849 * since it refers to the store emitted earlier.
14853 g_assert (sreg != -1);
14854 vreg_to_lvreg [var->dreg] = sreg;
14855 g_assert (lvregs_len < 1024);
14856 lvregs [lvregs_len ++] = var->dreg;
14860 sregs [srcindex] = sreg;
14861 //mono_inst_set_src_registers (ins, sregs);
14863 #if SIZEOF_REGISTER != 8
14864 if (regtype == 'l') {
14865 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14866 mono_bblock_insert_before_ins (bb, ins, load_ins);
14867 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14868 mono_bblock_insert_before_ins (bb, ins, load_ins);
14869 use_ins = load_ins;
14874 #if SIZEOF_REGISTER == 4
14875 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14877 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14878 mono_bblock_insert_before_ins (bb, ins, load_ins);
14879 use_ins = load_ins;
14883 if (var->dreg < orig_next_vreg) {
14884 live_range_end [var->dreg] = use_ins;
14885 live_range_end_bb [var->dreg] = bb;
14888 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14891 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14892 tmp->inst_c1 = var->dreg;
14893 mono_bblock_insert_after_ins (bb, ins, tmp);
14897 mono_inst_set_src_registers (ins, sregs);
14899 if (dest_has_lvreg) {
14900 g_assert (ins->dreg != -1);
14901 vreg_to_lvreg [prev_dreg] = ins->dreg;
14902 g_assert (lvregs_len < 1024);
14903 lvregs [lvregs_len ++] = prev_dreg;
14904 dest_has_lvreg = FALSE;
14908 tmp_reg = ins->dreg;
14909 ins->dreg = ins->sreg2;
14910 ins->sreg2 = tmp_reg;
14913 if (MONO_IS_CALL (ins)) {
14914 /* Clear vreg_to_lvreg array */
14915 for (i = 0; i < lvregs_len; i++)
14916 vreg_to_lvreg [lvregs [i]] = 0;
14918 } else if (ins->opcode == OP_NOP) {
14920 MONO_INST_NULLIFY_SREGS (ins);
14923 if (cfg->verbose_level > 2)
14924 mono_print_ins_index (1, ins);
14927 /* Extend the live range based on the liveness info */
14928 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14929 for (i = 0; i < cfg->num_varinfo; i ++) {
14930 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14932 if (vreg_is_volatile (cfg, vi->vreg))
14933 /* The liveness info is incomplete */
14936 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14937 /* Live from at least the first ins of this bb */
14938 live_range_start [vi->vreg] = bb->code;
14939 live_range_start_bb [vi->vreg] = bb;
14942 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14943 /* Live at least until the last ins of this bb */
14944 live_range_end [vi->vreg] = bb->last_ins;
14945 live_range_end_bb [vi->vreg] = bb;
14952 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14953 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14955 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14956 for (i = 0; i < cfg->num_varinfo; ++i) {
14957 int vreg = MONO_VARINFO (cfg, i)->vreg;
14960 if (live_range_start [vreg]) {
14961 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14963 ins->inst_c1 = vreg;
14964 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14966 if (live_range_end [vreg]) {
14967 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14969 ins->inst_c1 = vreg;
14970 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14971 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14973 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14978 if (cfg->gsharedvt_locals_var_ins) {
14979 /* Nullify if unused */
14980 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14981 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14984 g_free (live_range_start);
14985 g_free (live_range_end);
14986 g_free (live_range_start_bb);
14987 g_free (live_range_end_bb);
14991 mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
14993 MonoInst *ret, *move, *source;
14994 MonoClass *klass = ins->klass;
14995 int context_used = mini_class_check_context_used (cfg, klass);
14996 int is_isinst = ins->opcode == OP_ISINST;
14997 g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
14998 source = get_vreg_to_inst (cfg, ins->sreg1);
14999 if (!source || source == (MonoInst *) -1)
15000 source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
15001 g_assert (source && source != (MonoInst *) -1);
15003 MonoBasicBlock *first_bb;
15004 NEW_BBLOCK (cfg, first_bb);
15005 cfg->cbb = first_bb;
15007 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
15009 ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
15011 ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
15012 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
15013 MonoInst *iargs [1];
15016 iargs [0] = source;
15018 MonoMethod *wrapper = mono_marshal_get_isinst (klass);
15019 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15021 MonoMethod *wrapper = mono_marshal_get_castclass (klass);
15022 save_cast_details (cfg, klass, source->dreg, TRUE);
15023 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15024 reset_cast_details (cfg);
15026 g_assert (costs > 0);
15030 ret = handle_isinst (cfg, klass, source, context_used);
15032 ret = handle_castclass (cfg, klass, source, context_used);
15034 EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
15036 g_assert (cfg->cbb->code || first_bb->code);
15037 MonoInst *prev = ins->prev;
15038 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
15042 mono_decompose_typechecks (MonoCompile *cfg)
15044 for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
15046 MONO_BB_FOR_EACH_INS (bb, ins) {
15047 switch (ins->opcode) {
15050 mono_decompose_typecheck (cfg, bb, ins);
15060 * - use 'iadd' instead of 'int_add'
15061 * - handling ovf opcodes: decompose in method_to_ir.
15062 * - unify iregs/fregs
15063 * -> partly done, the missing parts are:
15064 * - a more complete unification would involve unifying the hregs as well, so
15065 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15066 * would no longer map to the machine hregs, so the code generators would need to
15067 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15068 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15069 * fp/non-fp branches speeds it up by about 15%.
15070 * - use sext/zext opcodes instead of shifts
15072 * - get rid of TEMPLOADs if possible and use vregs instead
15073 * - clean up usage of OP_P/OP_ opcodes
15074 * - cleanup usage of DUMMY_USE
15075 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15077 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15078 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15079 * - make sure handle_stack_args () is called before the branch is emitted
15080 * - when the new IR is done, get rid of all unused stuff
15081 * - COMPARE/BEQ as separate instructions or unify them ?
15082 * - keeping them separate allows specialized compare instructions like
15083 * compare_imm, compare_membase
15084 * - most back ends unify fp compare+branch, fp compare+ceq
15085 * - integrate mono_save_args into inline_method
15086 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15087 * - handle long shift opts on 32 bit platforms somehow: they require
15088 * 3 sregs (2 for arg1 and 1 for arg2)
15089 * - make byref a 'normal' type.
15090 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15091 * variable if needed.
15092 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15093 * like inline_method.
15094 * - remove inlining restrictions
15095 * - fix LNEG and enable cfold of INEG
15096 * - generalize x86 optimizations like ldelema as a peephole optimization
15097 * - add store_mem_imm for amd64
15098 * - optimize the loading of the interruption flag in the managed->native wrappers
15099 * - avoid special handling of OP_NOP in passes
15100 * - move code inserting instructions into one function/macro.
15101 * - try a coalescing phase after liveness analysis
15102 * - add float -> vreg conversion + local optimizations on !x86
15103 * - figure out how to handle decomposed branches during optimizations, ie.
15104 * compare+branch, op_jump_table+op_br etc.
15105 * - promote RuntimeXHandles to vregs
15106 * - vtype cleanups:
15107 * - add a NEW_VARLOADA_VREG macro
15108 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15109 * accessing vtype fields.
15110 * - get rid of I8CONST on 64 bit platforms
15111 * - dealing with the increase in code size due to branches created during opcode
15113 * - use extended basic blocks
15114 * - all parts of the JIT
15115 * - handle_global_vregs () && local regalloc
15116 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15117 * - sources of increase in code size:
15120 * - isinst and castclass
15121 * - lvregs not allocated to global registers even if used multiple times
15122 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15124 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15125 * - add all micro optimizations from the old JIT
15126 * - put tree optimizations into the deadce pass
15127 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15128 * specific function.
15129 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15130 * fcompare + branchCC.
15131 * - create a helper function for allocating a stack slot, taking into account
15132 * MONO_CFG_HAS_SPILLUP.
15134 * - merge the ia64 switch changes.
15135 * - optimize mono_regstate2_alloc_int/float.
15136 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15137 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15138 * parts of the tree could be separated by other instructions, killing the tree
15139 * arguments, or stores killing loads etc. Also, should we fold loads into other
15140 * instructions if the result of the load is used multiple times ?
15141 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15142 * - LAST MERGE: 108395.
15143 * - when returning vtypes in registers, generate IR and append it to the end of the
15144 * last bb instead of doing it in the epilog.
15145 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15153 - When to decompose opcodes:
15154 - earlier: this makes some optimizations hard to implement, since the low level IR
15155 no longer contains the neccessary information. But it is easier to do.
15156 - later: harder to implement, enables more optimizations.
15157 - Branches inside bblocks:
15158 - created when decomposing complex opcodes.
15159 - branches to another bblock: harmless, but not tracked by the branch
15160 optimizations, so need to branch to a label at the start of the bblock.
15161 - branches to inside the same bblock: very problematic, trips up the local
15162 reg allocator. Can be fixed by spitting the current bblock, but that is a
15163 complex operation, since some local vregs can become global vregs etc.
15164 - Local/global vregs:
15165 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15166 local register allocator.
15167 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15168 structure, created by mono_create_var (). Assigned to hregs or the stack by
15169 the global register allocator.
15170 - When to do optimizations like alu->alu_imm:
15171 - earlier -> saves work later on since the IR will be smaller/simpler
15172 - later -> can work on more instructions
15173 - Handling of valuetypes:
15174 - When a vtype is pushed on the stack, a new temporary is created, an
15175 instruction computing its address (LDADDR) is emitted and pushed on
15176 the stack. Need to optimize cases when the vtype is used immediately as in
15177 argument passing, stloc etc.
15178 - Instead of the to_end stuff in the old JIT, simply call the function handling
15179 the values on the stack before emitting the last instruction of the bb.
15182 #endif /* DISABLE_JIT */