2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
28 #ifdef HAVE_SYS_TIME_H
36 #include <mono/utils/memcheck.h>
38 #include <mono/metadata/abi-details.h>
39 #include <mono/metadata/assembly.h>
40 #include <mono/metadata/attrdefs.h>
41 #include <mono/metadata/loader.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/class.h>
44 #include <mono/metadata/object.h>
45 #include <mono/metadata/exception.h>
46 #include <mono/metadata/opcodes.h>
47 #include <mono/metadata/mono-endian.h>
48 #include <mono/metadata/tokentype.h>
49 #include <mono/metadata/tabledefs.h>
50 #include <mono/metadata/marshal.h>
51 #include <mono/metadata/debug-helpers.h>
52 #include <mono/metadata/mono-debug.h>
53 #include <mono/metadata/mono-debug-debugger.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/metadata/debug-mono-symfile.h>
62 #include <mono/utils/mono-compiler.h>
63 #include <mono/utils/mono-memory-model.h>
64 #include <mono/utils/mono-error-internals.h>
65 #include <mono/metadata/mono-basic-block.h>
66 #include <mono/metadata/reflection-internals.h>
67 #include <mono/utils/mono-threads-coop.h>
73 #include "jit-icalls.h"
75 #include "debugger-agent.h"
76 #include "seq-points.h"
77 #include "aot-compiler.h"
78 #include "mini-llvm.h"
80 #define BRANCH_COST 10
81 #define INLINE_LENGTH_LIMIT 20
83 /* These have 'cfg' as an implicit argument */
84 #define INLINE_FAILURE(msg) do { \
85 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
86 inline_failure (cfg, msg); \
87 goto exception_exit; \
90 #define CHECK_CFG_EXCEPTION do {\
91 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE(method, field) do { \
95 field_access_failure ((cfg), (method), (field)); \
96 goto exception_exit; \
98 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
101 goto exception_exit; \
104 #define GSHAREDVT_FAILURE(opcode) do { \
105 if (cfg->gsharedvt) { \
106 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
107 goto exception_exit; \
110 #define OUT_OF_MEMORY_FAILURE do { \
111 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
112 mono_error_set_out_of_memory (&cfg->error, ""); \
113 goto exception_exit; \
115 #define DISABLE_AOT(cfg) do { \
116 if ((cfg)->verbose_level >= 2) \
117 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
118 (cfg)->disable_aot = TRUE; \
120 #define LOAD_ERROR do { \
121 break_on_unverified (); \
122 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
123 goto exception_exit; \
126 #define TYPE_LOAD_ERROR(klass) do { \
127 cfg->exception_ptr = klass; \
131 #define CHECK_CFG_ERROR do {\
132 if (!mono_error_ok (&cfg->error)) { \
133 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
134 goto mono_error_exit; \
138 /* Determine whenever 'ins' represents a load of the 'this' argument */
139 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
141 static int ldind_to_load_membase (int opcode);
142 static int stind_to_store_membase (int opcode);
144 int mono_op_to_op_imm (int opcode);
145 int mono_op_to_op_imm_noemul (int opcode);
147 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
149 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
150 guchar *ip, guint real_offset, gboolean inline_always);
152 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
160 /* type loading helpers */
161 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
162 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
165 * Instruction metadata
173 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
174 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
180 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
185 /* keep in sync with the enum in mini.h */
188 #include "mini-ops.h"
193 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
194 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
196 * This should contain the index of the last sreg + 1. This is not the same
197 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
199 const gint8 ins_sreg_counts[] = {
200 #include "mini-ops.h"
205 #define MONO_INIT_VARINFO(vi,id) do { \
206 (vi)->range.first_use.pos.bid = 0xffff; \
212 mono_alloc_ireg (MonoCompile *cfg)
214 return alloc_ireg (cfg);
218 mono_alloc_lreg (MonoCompile *cfg)
220 return alloc_lreg (cfg);
224 mono_alloc_freg (MonoCompile *cfg)
226 return alloc_freg (cfg);
230 mono_alloc_preg (MonoCompile *cfg)
232 return alloc_preg (cfg);
236 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
238 return alloc_dreg (cfg, stack_type);
242 * mono_alloc_ireg_ref:
244 * Allocate an IREG, and mark it as holding a GC ref.
247 mono_alloc_ireg_ref (MonoCompile *cfg)
249 return alloc_ireg_ref (cfg);
253 * mono_alloc_ireg_mp:
255 * Allocate an IREG, and mark it as holding a managed pointer.
258 mono_alloc_ireg_mp (MonoCompile *cfg)
260 return alloc_ireg_mp (cfg);
264 * mono_alloc_ireg_copy:
266 * Allocate an IREG with the same GC type as VREG.
269 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
271 if (vreg_is_ref (cfg, vreg))
272 return alloc_ireg_ref (cfg);
273 else if (vreg_is_mp (cfg, vreg))
274 return alloc_ireg_mp (cfg);
276 return alloc_ireg (cfg);
280 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
285 type = mini_get_underlying_type (type);
287 switch (type->type) {
300 case MONO_TYPE_FNPTR:
302 case MONO_TYPE_CLASS:
303 case MONO_TYPE_STRING:
304 case MONO_TYPE_OBJECT:
305 case MONO_TYPE_SZARRAY:
306 case MONO_TYPE_ARRAY:
310 #if SIZEOF_REGISTER == 8
316 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
319 case MONO_TYPE_VALUETYPE:
320 if (type->data.klass->enumtype) {
321 type = mono_class_enum_basetype (type->data.klass);
324 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 case MONO_TYPE_TYPEDBYREF:
329 case MONO_TYPE_GENERICINST:
330 type = &type->data.generic_class->container_class->byval_arg;
334 g_assert (cfg->gshared);
335 if (mini_type_var_is_vt (type))
338 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
340 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
346 mono_print_bb (MonoBasicBlock *bb, const char *msg)
351 printf ("\n%s %d: [IN: ", msg, bb->block_num);
352 for (i = 0; i < bb->in_count; ++i)
353 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
355 for (i = 0; i < bb->out_count; ++i)
356 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *field_fname = mono_field_full_name (field);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
383 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
384 g_free (method_fname);
385 g_free (field_fname);
388 static MONO_NEVER_INLINE void
389 inline_failure (MonoCompile *cfg, const char *msg)
391 if (cfg->verbose_level >= 2)
392 printf ("inline failed: %s\n", msg);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
396 static MONO_NEVER_INLINE void
397 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
399 if (cfg->verbose_level > 2) \
400 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
404 static MONO_NEVER_INLINE void
405 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
407 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
408 if (cfg->verbose_level >= 2)
409 printf ("%s\n", cfg->exception_message);
410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
415 * foo<T> (int i) { ldarg.0; box T; }
417 #define UNVERIFIED do { \
418 if (cfg->gsharedvt) { \
419 if (cfg->verbose_level > 2) \
420 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
422 goto exception_exit; \
424 break_on_unverified (); \
428 #define GET_BBLOCK(cfg,tblock,ip) do { \
429 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
431 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
432 NEW_BBLOCK (cfg, (tblock)); \
433 (tblock)->cil_code = (ip); \
434 ADD_BBLOCK (cfg, (tblock)); \
438 #if defined(TARGET_X86) || defined(TARGET_AMD64)
439 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
440 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
441 (dest)->dreg = alloc_ireg_mp ((cfg)); \
442 (dest)->sreg1 = (sr1); \
443 (dest)->sreg2 = (sr2); \
444 (dest)->inst_imm = (imm); \
445 (dest)->backend.shift_amount = (shift); \
446 MONO_ADD_INS ((cfg)->cbb, (dest)); \
450 /* Emit conversions so both operands of a binary opcode are of the same type */
452 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
454 MonoInst *arg1 = *arg1_ref;
455 MonoInst *arg2 = *arg2_ref;
458 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
459 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
462 /* Mixing r4/r8 is allowed by the spec */
463 if (arg1->type == STACK_R4) {
464 int dreg = alloc_freg (cfg);
466 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
467 conv->type = STACK_R8;
471 if (arg2->type == STACK_R4) {
472 int dreg = alloc_freg (cfg);
474 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
475 conv->type = STACK_R8;
481 #if SIZEOF_REGISTER == 8
482 /* FIXME: Need to add many more cases */
483 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
486 int dr = alloc_preg (cfg);
487 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
488 (ins)->sreg2 = widen->dreg;
493 #define ADD_BINOP(op) do { \
494 MONO_INST_NEW (cfg, ins, (op)); \
496 ins->sreg1 = sp [0]->dreg; \
497 ins->sreg2 = sp [1]->dreg; \
498 type_from_op (cfg, ins, sp [0], sp [1]); \
500 /* Have to insert a widening op */ \
501 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
502 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
503 MONO_ADD_INS ((cfg)->cbb, (ins)); \
504 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
507 #define ADD_UNOP(op) do { \
508 MONO_INST_NEW (cfg, ins, (op)); \
510 ins->sreg1 = sp [0]->dreg; \
511 type_from_op (cfg, ins, sp [0], NULL); \
513 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode (cfg, ins); \
518 #define ADD_BINCOND(next_block) do { \
521 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
522 cmp->sreg1 = sp [0]->dreg; \
523 cmp->sreg2 = sp [1]->dreg; \
524 type_from_op (cfg, cmp, sp [0], sp [1]); \
526 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
527 type_from_op (cfg, ins, sp [0], sp [1]); \
528 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
529 GET_BBLOCK (cfg, tblock, target); \
530 link_bblock (cfg, cfg->cbb, tblock); \
531 ins->inst_true_bb = tblock; \
532 if ((next_block)) { \
533 link_bblock (cfg, cfg->cbb, (next_block)); \
534 ins->inst_false_bb = (next_block); \
535 start_new_bblock = 1; \
537 GET_BBLOCK (cfg, tblock, ip); \
538 link_bblock (cfg, cfg->cbb, tblock); \
539 ins->inst_false_bb = tblock; \
540 start_new_bblock = 2; \
542 if (sp != stack_start) { \
543 handle_stack_args (cfg, stack_start, sp - stack_start); \
544 CHECK_UNVERIFIABLE (cfg); \
546 MONO_ADD_INS (cfg->cbb, cmp); \
547 MONO_ADD_INS (cfg->cbb, ins); \
551 * link_bblock: Links two basic blocks
553 * links two basic blocks in the control flow graph, the 'from'
554 * argument is the starting block and the 'to' argument is the block
555 * the control flow ends to after 'from'.
558 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
560 MonoBasicBlock **newa;
564 if (from->cil_code) {
566 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
568 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
571 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
573 printf ("edge from entry to exit\n");
578 for (i = 0; i < from->out_count; ++i) {
579 if (to == from->out_bb [i]) {
585 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
586 for (i = 0; i < from->out_count; ++i) {
587 newa [i] = from->out_bb [i];
595 for (i = 0; i < to->in_count; ++i) {
596 if (from == to->in_bb [i]) {
602 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
603 for (i = 0; i < to->in_count; ++i) {
604 newa [i] = to->in_bb [i];
613 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
615 link_bblock (cfg, from, to);
619 * mono_find_block_region:
621 * We mark each basic block with a region ID. We use that to avoid BB
622 * optimizations when blocks are in different regions.
625 * A region token that encodes where this region is, and information
626 * about the clause owner for this block.
628 * The region encodes the try/catch/filter clause that owns this block
629 * as well as the type. -1 is a special value that represents a block
630 * that is in none of try/catch/filter.
633 mono_find_block_region (MonoCompile *cfg, int offset)
635 MonoMethodHeader *header = cfg->header;
636 MonoExceptionClause *clause;
639 for (i = 0; i < header->num_clauses; ++i) {
640 clause = &header->clauses [i];
641 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
642 (offset < (clause->handler_offset)))
643 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
645 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
646 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
647 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
648 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
649 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
651 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
654 for (i = 0; i < header->num_clauses; ++i) {
655 clause = &header->clauses [i];
657 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
658 return ((i + 1) << 8) | clause->flags;
665 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
667 MonoMethodHeader *header = cfg->header;
668 MonoExceptionClause *clause;
672 for (i = 0; i < header->num_clauses; ++i) {
673 clause = &header->clauses [i];
674 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
675 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
676 if (clause->flags == type)
677 res = g_list_append (res, clause);
684 mono_create_spvar_for_region (MonoCompile *cfg, int region)
688 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
692 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
693 /* prevent it from being register allocated */
694 var->flags |= MONO_INST_VOLATILE;
696 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
700 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
702 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
706 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
710 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
714 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
715 /* prevent it from being register allocated */
716 var->flags |= MONO_INST_VOLATILE;
718 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
724 * Returns the type used in the eval stack when @type is loaded.
725 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
728 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
732 type = mini_get_underlying_type (type);
733 inst->klass = klass = mono_class_from_mono_type (type);
735 inst->type = STACK_MP;
740 switch (type->type) {
742 inst->type = STACK_INV;
750 inst->type = STACK_I4;
755 case MONO_TYPE_FNPTR:
756 inst->type = STACK_PTR;
758 case MONO_TYPE_CLASS:
759 case MONO_TYPE_STRING:
760 case MONO_TYPE_OBJECT:
761 case MONO_TYPE_SZARRAY:
762 case MONO_TYPE_ARRAY:
763 inst->type = STACK_OBJ;
767 inst->type = STACK_I8;
770 inst->type = cfg->r4_stack_type;
773 inst->type = STACK_R8;
775 case MONO_TYPE_VALUETYPE:
776 if (type->data.klass->enumtype) {
777 type = mono_class_enum_basetype (type->data.klass);
781 inst->type = STACK_VTYPE;
784 case MONO_TYPE_TYPEDBYREF:
785 inst->klass = mono_defaults.typed_reference_class;
786 inst->type = STACK_VTYPE;
788 case MONO_TYPE_GENERICINST:
789 type = &type->data.generic_class->container_class->byval_arg;
793 g_assert (cfg->gshared);
794 if (mini_is_gsharedvt_type (type)) {
795 g_assert (cfg->gsharedvt);
796 inst->type = STACK_VTYPE;
798 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
802 g_error ("unknown type 0x%02x in eval stack type", type->type);
807 * The following tables are used to quickly validate the IL code in type_from_op ().
810 bin_num_table [STACK_MAX] [STACK_MAX] = {
811 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
816 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
824 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
827 /* reduce the size of this table */
829 bin_int_table [STACK_MAX] [STACK_MAX] = {
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
841 bin_comp_table [STACK_MAX] [STACK_MAX] = {
842 /* Inv i L p F & O vt r4 */
844 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
845 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
846 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
847 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
848 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
849 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
850 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
851 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
854 /* reduce the size of this table */
856 shift_table [STACK_MAX] [STACK_MAX] = {
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
868 * Tables to map from the non-specific opcode to the matching
869 * type-specific opcode.
871 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
873 binops_op_map [STACK_MAX] = {
874 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
877 /* handles from CEE_NEG to CEE_CONV_U8 */
879 unops_op_map [STACK_MAX] = {
880 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
883 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
885 ovfops_op_map [STACK_MAX] = {
886 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
889 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
891 ovf2ops_op_map [STACK_MAX] = {
892 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
895 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
897 ovf3ops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
901 /* handles from CEE_BEQ to CEE_BLT_UN */
903 beqops_op_map [STACK_MAX] = {
904 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
907 /* handles from CEE_CEQ to CEE_CLT_UN */
909 ceqops_op_map [STACK_MAX] = {
910 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
914 * Sets ins->type (the type on the eval stack) according to the
915 * type of the opcode and the arguments to it.
916 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
918 * FIXME: this function sets ins->type unconditionally in some cases, but
919 * it should set it to invalid for some types (a conv.x on an object)
922 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
924 switch (ins->opcode) {
931 /* FIXME: check unverifiable args for STACK_MP */
932 ins->type = bin_num_table [src1->type] [src2->type];
933 ins->opcode += binops_op_map [ins->type];
940 ins->type = bin_int_table [src1->type] [src2->type];
941 ins->opcode += binops_op_map [ins->type];
946 ins->type = shift_table [src1->type] [src2->type];
947 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
953 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
954 ins->opcode = OP_LCOMPARE;
955 else if (src1->type == STACK_R4)
956 ins->opcode = OP_RCOMPARE;
957 else if (src1->type == STACK_R8)
958 ins->opcode = OP_FCOMPARE;
960 ins->opcode = OP_ICOMPARE;
962 case OP_ICOMPARE_IMM:
963 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE_IMM;
977 ins->opcode += beqops_op_map [src1->type];
980 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
981 ins->opcode += ceqops_op_map [src1->type];
987 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
992 ins->type = neg_table [src1->type];
993 ins->opcode += unops_op_map [ins->type];
996 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
997 ins->type = src1->type;
999 ins->type = STACK_INV;
1000 ins->opcode += unops_op_map [ins->type];
1006 ins->type = STACK_I4;
1007 ins->opcode += unops_op_map [src1->type];
1010 ins->type = STACK_R8;
1011 switch (src1->type) {
1014 ins->opcode = OP_ICONV_TO_R_UN;
1017 ins->opcode = OP_LCONV_TO_R_UN;
1021 case CEE_CONV_OVF_I1:
1022 case CEE_CONV_OVF_U1:
1023 case CEE_CONV_OVF_I2:
1024 case CEE_CONV_OVF_U2:
1025 case CEE_CONV_OVF_I4:
1026 case CEE_CONV_OVF_U4:
1027 ins->type = STACK_I4;
1028 ins->opcode += ovf3ops_op_map [src1->type];
1030 case CEE_CONV_OVF_I_UN:
1031 case CEE_CONV_OVF_U_UN:
1032 ins->type = STACK_PTR;
1033 ins->opcode += ovf2ops_op_map [src1->type];
1035 case CEE_CONV_OVF_I1_UN:
1036 case CEE_CONV_OVF_I2_UN:
1037 case CEE_CONV_OVF_I4_UN:
1038 case CEE_CONV_OVF_U1_UN:
1039 case CEE_CONV_OVF_U2_UN:
1040 case CEE_CONV_OVF_U4_UN:
1041 ins->type = STACK_I4;
1042 ins->opcode += ovf2ops_op_map [src1->type];
1045 ins->type = STACK_PTR;
1046 switch (src1->type) {
1048 ins->opcode = OP_ICONV_TO_U;
1052 #if SIZEOF_VOID_P == 8
1053 ins->opcode = OP_LCONV_TO_U;
1055 ins->opcode = OP_MOVE;
1059 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_FCONV_TO_U;
1068 ins->type = STACK_I8;
1069 ins->opcode += unops_op_map [src1->type];
1071 case CEE_CONV_OVF_I8:
1072 case CEE_CONV_OVF_U8:
1073 ins->type = STACK_I8;
1074 ins->opcode += ovf3ops_op_map [src1->type];
1076 case CEE_CONV_OVF_U8_UN:
1077 case CEE_CONV_OVF_I8_UN:
1078 ins->type = STACK_I8;
1079 ins->opcode += ovf2ops_op_map [src1->type];
1082 ins->type = cfg->r4_stack_type;
1083 ins->opcode += unops_op_map [src1->type];
1086 ins->type = STACK_R8;
1087 ins->opcode += unops_op_map [src1->type];
1090 ins->type = STACK_R8;
1094 ins->type = STACK_I4;
1095 ins->opcode += ovfops_op_map [src1->type];
1098 case CEE_CONV_OVF_I:
1099 case CEE_CONV_OVF_U:
1100 ins->type = STACK_PTR;
1101 ins->opcode += ovfops_op_map [src1->type];
1104 case CEE_ADD_OVF_UN:
1106 case CEE_MUL_OVF_UN:
1108 case CEE_SUB_OVF_UN:
1109 ins->type = bin_num_table [src1->type] [src2->type];
1110 ins->opcode += ovfops_op_map [src1->type];
1111 if (ins->type == STACK_R8)
1112 ins->type = STACK_INV;
1114 case OP_LOAD_MEMBASE:
1115 ins->type = STACK_PTR;
1117 case OP_LOADI1_MEMBASE:
1118 case OP_LOADU1_MEMBASE:
1119 case OP_LOADI2_MEMBASE:
1120 case OP_LOADU2_MEMBASE:
1121 case OP_LOADI4_MEMBASE:
1122 case OP_LOADU4_MEMBASE:
1123 ins->type = STACK_PTR;
1125 case OP_LOADI8_MEMBASE:
1126 ins->type = STACK_I8;
1128 case OP_LOADR4_MEMBASE:
1129 ins->type = cfg->r4_stack_type;
1131 case OP_LOADR8_MEMBASE:
1132 ins->type = STACK_R8;
1135 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1139 if (ins->type == STACK_MP)
1140 ins->klass = mono_defaults.object_class;
1145 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1151 param_table [STACK_MAX] [STACK_MAX] = {
1156 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1161 switch (args->type) {
1171 for (i = 0; i < sig->param_count; ++i) {
1172 switch (args [i].type) {
1176 if (!sig->params [i]->byref)
1180 if (sig->params [i]->byref)
1182 switch (sig->params [i]->type) {
1183 case MONO_TYPE_CLASS:
1184 case MONO_TYPE_STRING:
1185 case MONO_TYPE_OBJECT:
1186 case MONO_TYPE_SZARRAY:
1187 case MONO_TYPE_ARRAY:
1194 if (sig->params [i]->byref)
1196 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1205 /*if (!param_table [args [i].type] [sig->params [i]->type])
1213 * When we need a pointer to the current domain many times in a method, we
1214 * call mono_domain_get() once and we store the result in a local variable.
1215 * This function returns the variable that represents the MonoDomain*.
1217 inline static MonoInst *
1218 mono_get_domainvar (MonoCompile *cfg)
1220 if (!cfg->domainvar)
1221 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1222 return cfg->domainvar;
1226 * The got_var contains the address of the Global Offset Table when AOT
1230 mono_get_got_var (MonoCompile *cfg)
1232 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1234 if (!cfg->got_var) {
1235 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1237 return cfg->got_var;
1241 mono_get_vtable_var (MonoCompile *cfg)
1243 g_assert (cfg->gshared);
1245 if (!cfg->rgctx_var) {
1246 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 /* force the var to be stack allocated */
1248 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1251 return cfg->rgctx_var;
1255 type_from_stack_type (MonoInst *ins) {
1256 switch (ins->type) {
1257 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1258 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1259 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1260 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1261 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1263 return &ins->klass->this_arg;
1264 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1265 case STACK_VTYPE: return &ins->klass->byval_arg;
1267 g_error ("stack type %d to monotype not handled\n", ins->type);
1272 static G_GNUC_UNUSED int
1273 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1275 t = mono_type_get_underlying_type (t);
1287 case MONO_TYPE_FNPTR:
1289 case MONO_TYPE_CLASS:
1290 case MONO_TYPE_STRING:
1291 case MONO_TYPE_OBJECT:
1292 case MONO_TYPE_SZARRAY:
1293 case MONO_TYPE_ARRAY:
1299 return cfg->r4_stack_type;
1302 case MONO_TYPE_VALUETYPE:
1303 case MONO_TYPE_TYPEDBYREF:
1305 case MONO_TYPE_GENERICINST:
1306 if (mono_type_generic_inst_is_valuetype (t))
1312 g_assert_not_reached ();
1319 array_access_to_klass (int opcode)
1323 return mono_defaults.byte_class;
1325 return mono_defaults.uint16_class;
1328 return mono_defaults.int_class;
1331 return mono_defaults.sbyte_class;
1334 return mono_defaults.int16_class;
1337 return mono_defaults.int32_class;
1339 return mono_defaults.uint32_class;
1342 return mono_defaults.int64_class;
1345 return mono_defaults.single_class;
1348 return mono_defaults.double_class;
1349 case CEE_LDELEM_REF:
1350 case CEE_STELEM_REF:
1351 return mono_defaults.object_class;
1353 g_assert_not_reached ();
1359 * We try to share variables when possible
1362 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1367 /* inlining can result in deeper stacks */
1368 if (slot >= cfg->header->max_stack)
1369 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1371 pos = ins->type - 1 + slot * STACK_MAX;
1373 switch (ins->type) {
1380 if ((vnum = cfg->intvars [pos]))
1381 return cfg->varinfo [vnum];
1382 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 cfg->intvars [pos] = res->inst_c0;
1386 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1392 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1395 * Don't use this if a generic_context is set, since that means AOT can't
1396 * look up the method using just the image+token.
1397 * table == 0 means this is a reference made from a wrapper.
1399 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1400 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1401 jump_info_token->image = image;
1402 jump_info_token->token = token;
1403 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1408 * This function is called to handle items that are left on the evaluation stack
1409 * at basic block boundaries. What happens is that we save the values to local variables
1410 * and we reload them later when first entering the target basic block (with the
1411 * handle_loaded_temps () function).
1412 * A single joint point will use the same variables (stored in the array bb->out_stack or
1413 * bb->in_stack, if the basic block is before or after the joint point).
1415 * This function needs to be called _before_ emitting the last instruction of
1416 * the bb (i.e. before emitting a branch).
1417 * If the stack merge fails at a join point, cfg->unverifiable is set.
1420 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1423 MonoBasicBlock *bb = cfg->cbb;
1424 MonoBasicBlock *outb;
1425 MonoInst *inst, **locals;
1430 if (cfg->verbose_level > 3)
1431 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1432 if (!bb->out_scount) {
1433 bb->out_scount = count;
1434 //printf ("bblock %d has out:", bb->block_num);
1436 for (i = 0; i < bb->out_count; ++i) {
1437 outb = bb->out_bb [i];
1438 /* exception handlers are linked, but they should not be considered for stack args */
1439 if (outb->flags & BB_EXCEPTION_HANDLER)
1441 //printf (" %d", outb->block_num);
1442 if (outb->in_stack) {
1444 bb->out_stack = outb->in_stack;
1450 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1451 for (i = 0; i < count; ++i) {
1453 * try to reuse temps already allocated for this purpouse, if they occupy the same
1454 * stack slot and if they are of the same type.
1455 * This won't cause conflicts since if 'local' is used to
1456 * store one of the values in the in_stack of a bblock, then
1457 * the same variable will be used for the same outgoing stack
1459 * This doesn't work when inlining methods, since the bblocks
1460 * in the inlined methods do not inherit their in_stack from
1461 * the bblock they are inlined to. See bug #58863 for an
1464 if (cfg->inlined_method)
1465 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1467 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1472 for (i = 0; i < bb->out_count; ++i) {
1473 outb = bb->out_bb [i];
1474 /* exception handlers are linked, but they should not be considered for stack args */
1475 if (outb->flags & BB_EXCEPTION_HANDLER)
1477 if (outb->in_scount) {
1478 if (outb->in_scount != bb->out_scount) {
1479 cfg->unverifiable = TRUE;
1482 continue; /* check they are the same locals */
1484 outb->in_scount = count;
1485 outb->in_stack = bb->out_stack;
1488 locals = bb->out_stack;
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1499 * It is possible that the out bblocks already have in_stack assigned, and
1500 * the in_stacks differ. In this case, we will store to all the different
1507 /* Find a bblock which has a different in_stack */
1509 while (bindex < bb->out_count) {
1510 outb = bb->out_bb [bindex];
1511 /* exception handlers are linked, but they should not be considered for stack args */
1512 if (outb->flags & BB_EXCEPTION_HANDLER) {
1516 if (outb->in_stack != locals) {
1517 for (i = 0; i < count; ++i) {
1518 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1519 inst->cil_code = sp [i]->cil_code;
1520 sp [i] = locals [i];
1521 if (cfg->verbose_level > 3)
1522 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1524 locals = outb->in_stack;
1534 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1538 if (cfg->compile_aot) {
1539 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1545 ji.type = patch_type;
1546 ji.data.target = data;
1547 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1548 mono_error_assert_ok (&error);
1550 EMIT_NEW_PCONST (cfg, ins, target);
1556 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1558 int ibitmap_reg = alloc_preg (cfg);
1559 #ifdef COMPRESSED_INTERFACE_BITMAP
1561 MonoInst *res, *ins;
1562 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1563 MONO_ADD_INS (cfg->cbb, ins);
1565 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1566 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1567 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1569 int ibitmap_byte_reg = alloc_preg (cfg);
1571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1573 if (cfg->compile_aot) {
1574 int iid_reg = alloc_preg (cfg);
1575 int shifted_iid_reg = alloc_preg (cfg);
1576 int ibitmap_byte_address_reg = alloc_preg (cfg);
1577 int masked_iid_reg = alloc_preg (cfg);
1578 int iid_one_bit_reg = alloc_preg (cfg);
1579 int iid_bit_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1582 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1583 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1585 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1586 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1587 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1589 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1596 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1597 * stored in "klass_reg" implements the interface "klass".
1600 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1602 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1606 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1607 * stored in "vtable_reg" implements the interface "klass".
1610 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1612 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1616 * Emit code which checks whenever the interface id of @klass is smaller than
1617 * than the value given by max_iid_reg.
1620 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1621 MonoBasicBlock *false_target)
1623 if (cfg->compile_aot) {
1624 int iid_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1633 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1636 /* Same as above, but obtains max_iid from a vtable */
1638 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1639 MonoBasicBlock *false_target)
1641 int max_iid_reg = alloc_preg (cfg);
1643 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1644 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1647 /* Same as above, but obtains max_iid from a klass */
1649 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1650 MonoBasicBlock *false_target)
1652 int max_iid_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1655 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1659 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1661 int idepth_reg = alloc_preg (cfg);
1662 int stypes_reg = alloc_preg (cfg);
1663 int stype = alloc_preg (cfg);
1665 mono_class_setup_supertypes (klass);
1667 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1668 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1675 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1676 } else if (cfg->compile_aot) {
1677 int const_reg = alloc_preg (cfg);
1678 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1679 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1687 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1689 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1693 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1695 int intf_reg = alloc_preg (cfg);
1697 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1698 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1703 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1707 * Variant of the above that takes a register to the class, not the vtable.
1710 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1712 int intf_bit_reg = alloc_preg (cfg);
1714 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1715 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1720 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1724 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1727 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1729 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1730 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1732 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1736 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1738 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1742 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1744 if (cfg->compile_aot) {
1745 int const_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1751 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1755 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1758 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1761 int rank_reg = alloc_preg (cfg);
1762 int eclass_reg = alloc_preg (cfg);
1764 g_assert (!klass_inst);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1766 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1767 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1768 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1770 if (klass->cast_class == mono_defaults.object_class) {
1771 int parent_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1773 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1774 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1775 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1776 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1777 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1778 } else if (klass->cast_class == mono_defaults.enum_class) {
1779 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1780 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1781 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1783 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1784 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1787 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1788 /* Check that the object is a vector too */
1789 int bounds_reg = alloc_preg (cfg);
1790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1791 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1792 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1795 int idepth_reg = alloc_preg (cfg);
1796 int stypes_reg = alloc_preg (cfg);
1797 int stype = alloc_preg (cfg);
1799 mono_class_setup_supertypes (klass);
1801 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1802 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1804 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1808 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1813 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1815 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1819 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1823 g_assert (val == 0);
1828 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1831 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1834 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1837 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1839 #if SIZEOF_REGISTER == 8
1841 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1847 val_reg = alloc_preg (cfg);
1849 if (SIZEOF_REGISTER == 8)
1850 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1852 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1855 /* This could be optimized further if neccesary */
1857 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1864 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1895 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1902 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1903 g_assert (size < 10000);
1906 /* This could be optimized further if neccesary */
1908 cur_reg = alloc_preg (cfg);
1909 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1917 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1929 cur_reg = alloc_preg (cfg);
1930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1937 cur_reg = alloc_preg (cfg);
1938 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1939 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1945 cur_reg = alloc_preg (cfg);
1946 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1947 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1955 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1959 if (cfg->compile_aot) {
1960 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1961 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1963 ins->sreg2 = c->dreg;
1964 MONO_ADD_INS (cfg->cbb, ins);
1966 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1968 ins->inst_offset = mini_get_tls_offset (tls_key);
1969 MONO_ADD_INS (cfg->cbb, ins);
1976 * Emit IR to push the current LMF onto the LMF stack.
1979 emit_push_lmf (MonoCompile *cfg)
1982 * Emit IR to push the LMF:
1983 * lmf_addr = <lmf_addr from tls>
1984 * lmf->lmf_addr = lmf_addr
1985 * lmf->prev_lmf = *lmf_addr
1988 int lmf_reg, prev_lmf_reg;
1989 MonoInst *ins, *lmf_ins;
1994 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1995 /* Load current lmf */
1996 lmf_ins = mono_get_lmf_intrinsic (cfg);
1998 MONO_ADD_INS (cfg->cbb, lmf_ins);
1999 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2000 lmf_reg = ins->dreg;
2001 /* Save previous_lmf */
2002 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2004 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2007 * Store lmf_addr in a variable, so it can be allocated to a global register.
2009 if (!cfg->lmf_addr_var)
2010 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2013 ins = mono_get_jit_tls_intrinsic (cfg);
2015 int jit_tls_dreg = ins->dreg;
2017 MONO_ADD_INS (cfg->cbb, ins);
2018 lmf_reg = alloc_preg (cfg);
2019 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2021 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2024 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2026 MONO_ADD_INS (cfg->cbb, lmf_ins);
2029 MonoInst *args [16], *jit_tls_ins, *ins;
2031 /* Inline mono_get_lmf_addr () */
2032 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2034 /* Load mono_jit_tls_id */
2035 if (cfg->compile_aot)
2036 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2038 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2039 /* call pthread_getspecific () */
2040 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2041 /* lmf_addr = &jit_tls->lmf */
2042 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2045 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2049 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2051 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2052 lmf_reg = ins->dreg;
2054 prev_lmf_reg = alloc_preg (cfg);
2055 /* Save previous_lmf */
2056 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2057 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2059 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2066 * Emit IR to pop the current LMF from the LMF stack.
2069 emit_pop_lmf (MonoCompile *cfg)
2071 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2077 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2078 lmf_reg = ins->dreg;
2080 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2081 /* Load previous_lmf */
2082 prev_lmf_reg = alloc_preg (cfg);
2083 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2085 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2088 * Emit IR to pop the LMF:
2089 * *(lmf->lmf_addr) = lmf->prev_lmf
2091 /* This could be called before emit_push_lmf () */
2092 if (!cfg->lmf_addr_var)
2093 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2094 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2096 prev_lmf_reg = alloc_preg (cfg);
2097 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2098 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2103 emit_instrumentation_call (MonoCompile *cfg, void *func)
2105 MonoInst *iargs [1];
2108 * Avoid instrumenting inlined methods since it can
2109 * distort profiling results.
2111 if (cfg->method != cfg->current_method)
2114 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2115 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2116 mono_emit_jit_icall (cfg, func, iargs);
2121 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2124 type = mini_get_underlying_type (type);
2125 switch (type->type) {
2126 case MONO_TYPE_VOID:
2127 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2134 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2138 case MONO_TYPE_FNPTR:
2139 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2140 case MONO_TYPE_CLASS:
2141 case MONO_TYPE_STRING:
2142 case MONO_TYPE_OBJECT:
2143 case MONO_TYPE_SZARRAY:
2144 case MONO_TYPE_ARRAY:
2145 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2148 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2151 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2153 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2155 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2156 case MONO_TYPE_VALUETYPE:
2157 if (type->data.klass->enumtype) {
2158 type = mono_class_enum_basetype (type->data.klass);
2161 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2162 case MONO_TYPE_TYPEDBYREF:
2163 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2164 case MONO_TYPE_GENERICINST:
2165 type = &type->data.generic_class->container_class->byval_arg;
2168 case MONO_TYPE_MVAR:
2170 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2172 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2177 //XXX this ignores if t is byref
2178 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
2181 * target_type_is_incompatible:
2182 * @cfg: MonoCompile context
2184 * Check that the item @arg on the evaluation stack can be stored
2185 * in the target type (can be a local, or field, etc).
2186 * The cfg arg can be used to check if we need verification or just
2189 * Returns: non-0 value if arg can't be stored on a target.
2192 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2194 MonoType *simple_type;
2197 if (target->byref) {
2198 /* FIXME: check that the pointed to types match */
2199 if (arg->type == STACK_MP) {
2200 if (cfg->verbose_level) printf ("ok\n");
2201 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2202 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2203 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2205 /* if the target is native int& or same type */
2206 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2209 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2210 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2211 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2215 if (arg->type == STACK_PTR)
2220 simple_type = mini_get_underlying_type (target);
2221 switch (simple_type->type) {
2222 case MONO_TYPE_VOID:
2230 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2234 /* STACK_MP is needed when setting pinned locals */
2235 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2240 case MONO_TYPE_FNPTR:
2242 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2243 * in native int. (#688008).
2245 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2248 case MONO_TYPE_CLASS:
2249 case MONO_TYPE_STRING:
2250 case MONO_TYPE_OBJECT:
2251 case MONO_TYPE_SZARRAY:
2252 case MONO_TYPE_ARRAY:
2253 if (arg->type != STACK_OBJ)
2255 /* FIXME: check type compatibility */
2259 if (arg->type != STACK_I8)
2263 if (arg->type != cfg->r4_stack_type)
2267 if (arg->type != STACK_R8)
2270 case MONO_TYPE_VALUETYPE:
2271 if (arg->type != STACK_VTYPE)
2273 klass = mono_class_from_mono_type (simple_type);
2274 if (klass != arg->klass)
2277 case MONO_TYPE_TYPEDBYREF:
2278 if (arg->type != STACK_VTYPE)
2280 klass = mono_class_from_mono_type (simple_type);
2281 if (klass != arg->klass)
2284 case MONO_TYPE_GENERICINST:
2285 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2286 MonoClass *target_class;
2287 if (arg->type != STACK_VTYPE)
2289 klass = mono_class_from_mono_type (simple_type);
2290 target_class = mono_class_from_mono_type (target);
2291 /* The second cases is needed when doing partial sharing */
2292 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2296 if (arg->type != STACK_OBJ)
2298 /* FIXME: check type compatibility */
2302 case MONO_TYPE_MVAR:
2303 g_assert (cfg->gshared);
2304 if (mini_type_var_is_vt (simple_type)) {
2305 if (arg->type != STACK_VTYPE)
2308 if (arg->type != STACK_OBJ)
2313 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2319 * Prepare arguments for passing to a function call.
2320 * Return a non-zero value if the arguments can't be passed to the given
2322 * The type checks are not yet complete and some conversions may need
2323 * casts on 32 or 64 bit architectures.
2325 * FIXME: implement this using target_type_is_incompatible ()
2328 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2330 MonoType *simple_type;
2334 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2338 for (i = 0; i < sig->param_count; ++i) {
2339 if (sig->params [i]->byref) {
2340 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2344 simple_type = mini_get_underlying_type (sig->params [i]);
2346 switch (simple_type->type) {
2347 case MONO_TYPE_VOID:
2356 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2362 case MONO_TYPE_FNPTR:
2363 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2366 case MONO_TYPE_CLASS:
2367 case MONO_TYPE_STRING:
2368 case MONO_TYPE_OBJECT:
2369 case MONO_TYPE_SZARRAY:
2370 case MONO_TYPE_ARRAY:
2371 if (args [i]->type != STACK_OBJ)
2376 if (args [i]->type != STACK_I8)
2380 if (args [i]->type != cfg->r4_stack_type)
2384 if (args [i]->type != STACK_R8)
2387 case MONO_TYPE_VALUETYPE:
2388 if (simple_type->data.klass->enumtype) {
2389 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2392 if (args [i]->type != STACK_VTYPE)
2395 case MONO_TYPE_TYPEDBYREF:
2396 if (args [i]->type != STACK_VTYPE)
2399 case MONO_TYPE_GENERICINST:
2400 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2403 case MONO_TYPE_MVAR:
2405 if (args [i]->type != STACK_VTYPE)
2409 g_error ("unknown type 0x%02x in check_call_signature",
2417 callvirt_to_call (int opcode)
2420 case OP_CALL_MEMBASE:
2422 case OP_VOIDCALL_MEMBASE:
2424 case OP_FCALL_MEMBASE:
2426 case OP_RCALL_MEMBASE:
2428 case OP_VCALL_MEMBASE:
2430 case OP_LCALL_MEMBASE:
2433 g_assert_not_reached ();
2440 callvirt_to_call_reg (int opcode)
2443 case OP_CALL_MEMBASE:
2445 case OP_VOIDCALL_MEMBASE:
2446 return OP_VOIDCALL_REG;
2447 case OP_FCALL_MEMBASE:
2448 return OP_FCALL_REG;
2449 case OP_RCALL_MEMBASE:
2450 return OP_RCALL_REG;
2451 case OP_VCALL_MEMBASE:
2452 return OP_VCALL_REG;
2453 case OP_LCALL_MEMBASE:
2454 return OP_LCALL_REG;
2456 g_assert_not_reached ();
2462 /* Either METHOD or IMT_ARG needs to be set */
2464 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2468 if (COMPILE_LLVM (cfg)) {
2470 method_reg = alloc_preg (cfg);
2471 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2473 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2474 method_reg = ins->dreg;
2478 call->imt_arg_reg = method_reg;
2480 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2485 method_reg = alloc_preg (cfg);
2486 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2488 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2489 method_reg = ins->dreg;
2492 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2495 static MonoJumpInfo *
2496 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2498 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2502 ji->data.target = target;
2508 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2511 return mono_class_check_context_used (klass);
2517 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2520 return mono_method_check_context_used (method);
2526 * check_method_sharing:
2528 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2531 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2533 gboolean pass_vtable = FALSE;
2534 gboolean pass_mrgctx = FALSE;
2536 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2537 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2538 gboolean sharable = FALSE;
2540 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2544 * Pass vtable iff target method might
2545 * be shared, which means that sharing
2546 * is enabled for its class and its
2547 * context is sharable (and it's not a
2550 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2554 if (mini_method_get_context (cmethod) &&
2555 mini_method_get_context (cmethod)->method_inst) {
2556 g_assert (!pass_vtable);
2558 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2561 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2566 if (out_pass_vtable)
2567 *out_pass_vtable = pass_vtable;
2568 if (out_pass_mrgctx)
2569 *out_pass_mrgctx = pass_mrgctx;
2572 inline static MonoCallInst *
2573 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2574 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2578 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2586 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2588 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2590 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2593 call->signature = sig;
2594 call->rgctx_reg = rgctx;
2595 sig_ret = mini_get_underlying_type (sig->ret);
2597 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2600 if (mini_type_is_vtype (sig_ret)) {
2601 call->vret_var = cfg->vret_addr;
2602 //g_assert_not_reached ();
2604 } else if (mini_type_is_vtype (sig_ret)) {
2605 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2608 temp->backend.is_pinvoke = sig->pinvoke;
2611 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2612 * address of return value to increase optimization opportunities.
2613 * Before vtype decomposition, the dreg of the call ins itself represents the
2614 * fact the call modifies the return value. After decomposition, the call will
2615 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2616 * will be transformed into an LDADDR.
2618 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2619 loada->dreg = alloc_preg (cfg);
2620 loada->inst_p0 = temp;
2621 /* We reference the call too since call->dreg could change during optimization */
2622 loada->inst_p1 = call;
2623 MONO_ADD_INS (cfg->cbb, loada);
2625 call->inst.dreg = temp->dreg;
2627 call->vret_var = loada;
2628 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2629 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2631 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2632 if (COMPILE_SOFT_FLOAT (cfg)) {
2634 * If the call has a float argument, we would need to do an r8->r4 conversion using
2635 * an icall, but that cannot be done during the call sequence since it would clobber
2636 * the call registers + the stack. So we do it before emitting the call.
2638 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2640 MonoInst *in = call->args [i];
2642 if (i >= sig->hasthis)
2643 t = sig->params [i - sig->hasthis];
2645 t = &mono_defaults.int_class->byval_arg;
2646 t = mono_type_get_underlying_type (t);
2648 if (!t->byref && t->type == MONO_TYPE_R4) {
2649 MonoInst *iargs [1];
2653 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2655 /* The result will be in an int vreg */
2656 call->args [i] = conv;
2662 call->need_unbox_trampoline = unbox_trampoline;
2665 if (COMPILE_LLVM (cfg))
2666 mono_llvm_emit_call (cfg, call);
2668 mono_arch_emit_call (cfg, call);
2670 mono_arch_emit_call (cfg, call);
2673 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2674 cfg->flags |= MONO_CFG_HAS_CALLS;
2680 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2682 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2683 cfg->uses_rgctx_reg = TRUE;
2684 call->rgctx_reg = TRUE;
2686 call->rgctx_arg_reg = rgctx_reg;
2690 inline static MonoInst*
2691 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2696 gboolean check_sp = FALSE;
2698 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2699 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2701 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2706 rgctx_reg = mono_alloc_preg (cfg);
2707 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2711 if (!cfg->stack_inbalance_var)
2712 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2714 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2715 ins->dreg = cfg->stack_inbalance_var->dreg;
2716 MONO_ADD_INS (cfg->cbb, ins);
2719 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2721 call->inst.sreg1 = addr->dreg;
2724 emit_imt_argument (cfg, call, NULL, imt_arg);
2726 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2731 sp_reg = mono_alloc_preg (cfg);
2733 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2735 MONO_ADD_INS (cfg->cbb, ins);
2737 /* Restore the stack so we don't crash when throwing the exception */
2738 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2739 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2740 MONO_ADD_INS (cfg->cbb, ins);
2742 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2747 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2749 return (MonoInst*)call;
2753 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2756 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2758 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2761 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2762 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2764 #ifndef DISABLE_REMOTING
2765 gboolean might_be_remote = FALSE;
2767 gboolean virtual_ = this_ins != NULL;
2768 gboolean enable_for_aot = TRUE;
2771 MonoInst *call_target = NULL;
2773 gboolean need_unbox_trampoline;
2776 sig = mono_method_signature (method);
2778 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2779 g_assert_not_reached ();
2782 rgctx_reg = mono_alloc_preg (cfg);
2783 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2786 if (method->string_ctor) {
2787 /* Create the real signature */
2788 /* FIXME: Cache these */
2789 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2790 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2795 context_used = mini_method_check_context_used (cfg, method);
2797 #ifndef DISABLE_REMOTING
2798 might_be_remote = this_ins && sig->hasthis &&
2799 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2800 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2802 if (might_be_remote && context_used) {
2805 g_assert (cfg->gshared);
2807 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2809 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2813 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2814 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2816 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2818 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2820 #ifndef DISABLE_REMOTING
2821 if (might_be_remote)
2822 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2825 call->method = method;
2826 call->inst.flags |= MONO_INST_HAS_METHOD;
2827 call->inst.inst_left = this_ins;
2828 call->tail_call = tail;
2831 int vtable_reg, slot_reg, this_reg;
2834 this_reg = this_ins->dreg;
2836 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2837 MonoInst *dummy_use;
2839 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2841 /* Make a call to delegate->invoke_impl */
2842 call->inst.inst_basereg = this_reg;
2843 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2844 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2846 /* We must emit a dummy use here because the delegate trampoline will
2847 replace the 'this' argument with the delegate target making this activation
2848 no longer a root for the delegate.
2849 This is an issue for delegates that target collectible code such as dynamic
2850 methods of GC'able assemblies.
2852 For a test case look into #667921.
2854 FIXME: a dummy use is not the best way to do it as the local register allocator
2855 will put it on a caller save register and spil it around the call.
2856 Ideally, we would either put it on a callee save register or only do the store part.
2858 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2860 return (MonoInst*)call;
2863 if ((!cfg->compile_aot || enable_for_aot) &&
2864 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2865 (MONO_METHOD_IS_FINAL (method) &&
2866 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2867 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2869 * the method is not virtual, we just need to ensure this is not null
2870 * and then we can call the method directly.
2872 #ifndef DISABLE_REMOTING
2873 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2875 * The check above ensures method is not gshared, this is needed since
2876 * gshared methods can't have wrappers.
2878 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2882 if (!method->string_ctor)
2883 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2885 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2886 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2888 * the method is virtual, but we can statically dispatch since either
2889 * it's class or the method itself are sealed.
2890 * But first we need to ensure it's not a null reference.
2892 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2894 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2895 } else if (call_target) {
2896 vtable_reg = alloc_preg (cfg);
2897 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2899 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2900 call->inst.sreg1 = call_target->dreg;
2901 call->inst.flags &= !MONO_INST_HAS_METHOD;
2903 vtable_reg = alloc_preg (cfg);
2904 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2905 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2906 guint32 imt_slot = mono_method_get_imt_slot (method);
2907 emit_imt_argument (cfg, call, call->method, imt_arg);
2908 slot_reg = vtable_reg;
2909 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2911 slot_reg = vtable_reg;
2912 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2913 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2915 g_assert (mono_method_signature (method)->generic_param_count);
2916 emit_imt_argument (cfg, call, call->method, imt_arg);
2920 call->inst.sreg1 = slot_reg;
2921 call->inst.inst_offset = offset;
2922 call->is_virtual = TRUE;
2926 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2929 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2931 return (MonoInst*)call;
2935 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2937 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2941 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2948 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2951 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2953 return (MonoInst*)call;
2957 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2959 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2963 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2967 * mono_emit_abs_call:
2969 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2971 inline static MonoInst*
2972 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2973 MonoMethodSignature *sig, MonoInst **args)
2975 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2979 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2982 if (cfg->abs_patches == NULL)
2983 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2984 g_hash_table_insert (cfg->abs_patches, ji, ji);
2985 ins = mono_emit_native_call (cfg, ji, sig, args);
2986 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2990 static MonoMethodSignature*
2991 sig_to_rgctx_sig (MonoMethodSignature *sig)
2993 // FIXME: memory allocation
2994 MonoMethodSignature *res;
2997 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2998 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2999 res->param_count = sig->param_count + 1;
3000 for (i = 0; i < sig->param_count; ++i)
3001 res->params [i] = sig->params [i];
3002 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3006 /* Make an indirect call to FSIG passing an additional argument */
3008 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3010 MonoMethodSignature *csig;
3011 MonoInst *args_buf [16];
3013 int i, pindex, tmp_reg;
3015 /* Make a call with an rgctx/extra arg */
3016 if (fsig->param_count + 2 < 16)
3019 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3022 args [pindex ++] = orig_args [0];
3023 for (i = 0; i < fsig->param_count; ++i)
3024 args [pindex ++] = orig_args [fsig->hasthis + i];
3025 tmp_reg = alloc_preg (cfg);
3026 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3027 csig = sig_to_rgctx_sig (fsig);
3028 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3031 /* Emit an indirect call to the function descriptor ADDR */
3033 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3035 int addr_reg, arg_reg;
3036 MonoInst *call_target;
3038 g_assert (cfg->llvm_only);
3041 * addr points to a <addr, arg> pair, load both of them, and
3042 * make a call to addr, passing arg as an extra arg.
3044 addr_reg = alloc_preg (cfg);
3045 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3046 arg_reg = alloc_preg (cfg);
3047 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3049 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3053 direct_icalls_enabled (MonoCompile *cfg)
3055 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3057 if (cfg->compile_llvm && !cfg->llvm_only)
3060 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3066 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
3069 * Call the jit icall without a wrapper if possible.
3070 * The wrapper is needed for the following reasons:
3071 * - to handle exceptions thrown using mono_raise_exceptions () from the
3072 * icall function. The EH code needs the lmf frame pushed by the
3073 * wrapper to be able to unwind back to managed code.
3074 * - to be able to do stack walks for asynchronously suspended
3075 * threads when debugging.
3077 if (info->no_raise && direct_icalls_enabled (cfg)) {
3081 if (!info->wrapper_method) {
3082 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3083 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3085 mono_memory_barrier ();
3089 * Inline the wrapper method, which is basically a call to the C icall, and
3090 * an exception check.
3092 costs = inline_method (cfg, info->wrapper_method, NULL,
3093 args, NULL, il_offset, TRUE);
3094 g_assert (costs > 0);
3095 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3099 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3104 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3106 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3107 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3111 * Native code might return non register sized integers
3112 * without initializing the upper bits.
3114 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3115 case OP_LOADI1_MEMBASE:
3116 widen_op = OP_ICONV_TO_I1;
3118 case OP_LOADU1_MEMBASE:
3119 widen_op = OP_ICONV_TO_U1;
3121 case OP_LOADI2_MEMBASE:
3122 widen_op = OP_ICONV_TO_I2;
3124 case OP_LOADU2_MEMBASE:
3125 widen_op = OP_ICONV_TO_U2;
3131 if (widen_op != -1) {
3132 int dreg = alloc_preg (cfg);
3135 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3136 widen->type = ins->type;
3147 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
3149 MonoInst *args [16];
3151 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
3152 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
3154 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
3158 get_memcpy_method (void)
3160 static MonoMethod *memcpy_method = NULL;
3161 if (!memcpy_method) {
3162 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3164 g_error ("Old corlib found. Install a new one");
3166 return memcpy_method;
3170 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3172 MonoClassField *field;
3173 gpointer iter = NULL;
3175 while ((field = mono_class_get_fields (klass, &iter))) {
3178 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3180 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3181 if (mini_type_is_reference (mono_field_get_type (field))) {
3182 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3183 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3185 MonoClass *field_class = mono_class_from_mono_type (field->type);
3186 if (field_class->has_references)
3187 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3193 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3195 int card_table_shift_bits;
3196 gpointer card_table_mask;
3198 MonoInst *dummy_use;
3199 int nursery_shift_bits;
3200 size_t nursery_size;
3202 if (!cfg->gen_write_barriers)
3205 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3207 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3209 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3212 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3213 wbarrier->sreg1 = ptr->dreg;
3214 wbarrier->sreg2 = value->dreg;
3215 MONO_ADD_INS (cfg->cbb, wbarrier);
3216 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3217 int offset_reg = alloc_preg (cfg);
3221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3222 if (card_table_mask)
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3225 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3226 * IMM's larger than 32bits.
3228 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3229 card_reg = ins->dreg;
3231 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3232 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3234 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3235 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3238 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3242 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3244 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3245 unsigned need_wb = 0;
3250 /*types with references can't have alignment smaller than sizeof(void*) */
3251 if (align < SIZEOF_VOID_P)
3254 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3255 if (size > 32 * SIZEOF_VOID_P)
3258 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3260 /* We don't unroll more than 5 stores to avoid code bloat. */
3261 if (size > 5 * SIZEOF_VOID_P) {
3262 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3263 size += (SIZEOF_VOID_P - 1);
3264 size &= ~(SIZEOF_VOID_P - 1);
3266 EMIT_NEW_ICONST (cfg, iargs [2], size);
3267 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3268 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3272 destreg = iargs [0]->dreg;
3273 srcreg = iargs [1]->dreg;
3276 dest_ptr_reg = alloc_preg (cfg);
3277 tmp_reg = alloc_preg (cfg);
3280 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3282 while (size >= SIZEOF_VOID_P) {
3283 MonoInst *load_inst;
3284 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3285 load_inst->dreg = tmp_reg;
3286 load_inst->inst_basereg = srcreg;
3287 load_inst->inst_offset = offset;
3288 MONO_ADD_INS (cfg->cbb, load_inst);
3290 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3293 emit_write_barrier (cfg, iargs [0], load_inst);
3295 offset += SIZEOF_VOID_P;
3296 size -= SIZEOF_VOID_P;
3299 /*tmp += sizeof (void*)*/
3300 if (size >= SIZEOF_VOID_P) {
3301 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3302 MONO_ADD_INS (cfg->cbb, iargs [0]);
3306 /* Those cannot be references since size < sizeof (void*) */
3308 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3315 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3316 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3322 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3323 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3332 * Emit code to copy a valuetype of type @klass whose address is stored in
3333 * @src->dreg to memory whose address is stored at @dest->dreg.
3336 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3338 MonoInst *iargs [4];
3341 MonoMethod *memcpy_method;
3342 MonoInst *size_ins = NULL;
3343 MonoInst *memcpy_ins = NULL;
3347 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3350 * This check breaks with spilled vars... need to handle it during verification anyway.
3351 * g_assert (klass && klass == src->klass && klass == dest->klass);
3354 if (mini_is_gsharedvt_klass (klass)) {
3356 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3357 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3361 n = mono_class_native_size (klass, &align);
3363 n = mono_class_value_size (klass, &align);
3365 /* if native is true there should be no references in the struct */
3366 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3367 /* Avoid barriers when storing to the stack */
3368 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3369 (dest->opcode == OP_LDADDR))) {
3375 context_used = mini_class_check_context_used (cfg, klass);
3377 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3378 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3380 } else if (context_used) {
3381 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3383 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3384 if (!cfg->compile_aot)
3385 mono_class_compute_gc_descriptor (klass);
3389 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3391 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3396 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3397 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3398 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3403 iargs [2] = size_ins;
3405 EMIT_NEW_ICONST (cfg, iargs [2], n);
3407 memcpy_method = get_memcpy_method ();
3409 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3411 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3416 get_memset_method (void)
3418 static MonoMethod *memset_method = NULL;
3419 if (!memset_method) {
3420 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3422 g_error ("Old corlib found. Install a new one");
3424 return memset_method;
3428 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3430 MonoInst *iargs [3];
3433 MonoMethod *memset_method;
3434 MonoInst *size_ins = NULL;
3435 MonoInst *bzero_ins = NULL;
3436 static MonoMethod *bzero_method;
3438 /* FIXME: Optimize this for the case when dest is an LDADDR */
3439 mono_class_init (klass);
3440 if (mini_is_gsharedvt_klass (klass)) {
3441 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3442 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3444 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3445 g_assert (bzero_method);
3447 iargs [1] = size_ins;
3448 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3452 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3454 n = mono_class_value_size (klass, &align);
3456 if (n <= sizeof (gpointer) * 8) {
3457 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3460 memset_method = get_memset_method ();
3462 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3463 EMIT_NEW_ICONST (cfg, iargs [2], n);
3464 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3471 * Emit IR to return either the this pointer for instance method,
3472 * or the mrgctx for static methods.
3475 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3477 MonoInst *this_ins = NULL;
3479 g_assert (cfg->gshared);
3481 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3482 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3483 !method->klass->valuetype)
3484 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3486 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3487 MonoInst *mrgctx_loc, *mrgctx_var;
3489 g_assert (!this_ins);
3490 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3492 mrgctx_loc = mono_get_vtable_var (cfg);
3493 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3496 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3497 MonoInst *vtable_loc, *vtable_var;
3499 g_assert (!this_ins);
3501 vtable_loc = mono_get_vtable_var (cfg);
3502 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3504 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3505 MonoInst *mrgctx_var = vtable_var;
3508 vtable_reg = alloc_preg (cfg);
3509 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3510 vtable_var->type = STACK_PTR;
3518 vtable_reg = alloc_preg (cfg);
3519 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3524 static MonoJumpInfoRgctxEntry *
3525 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3527 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3528 res->method = method;
3529 res->in_mrgctx = in_mrgctx;
3530 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3531 res->data->type = patch_type;
3532 res->data->data.target = patch_data;
3533 res->info_type = info_type;
3538 static inline MonoInst*
3539 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3541 MonoInst *args [16];
3544 // FIXME: No fastpath since the slot is not a compile time constant
3546 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3547 if (entry->in_mrgctx)
3548 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3550 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3554 * FIXME: This can be called during decompose, which is a problem since it creates
3556 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3558 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3560 MonoBasicBlock *is_null_bb, *end_bb;
3561 MonoInst *res, *ins, *call;
3564 slot = mini_get_rgctx_entry_slot (entry);
3566 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3567 index = MONO_RGCTX_SLOT_INDEX (slot);
3569 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3570 for (depth = 0; ; ++depth) {
3571 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3573 if (index < size - 1)
3578 NEW_BBLOCK (cfg, end_bb);
3579 NEW_BBLOCK (cfg, is_null_bb);
3582 rgctx_reg = rgctx->dreg;
3584 rgctx_reg = alloc_preg (cfg);
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3587 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3588 NEW_BBLOCK (cfg, is_null_bb);
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3594 for (i = 0; i < depth; ++i) {
3595 int array_reg = alloc_preg (cfg);
3597 /* load ptr to next array */
3598 if (mrgctx && i == 0)
3599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3602 rgctx_reg = array_reg;
3603 /* is the ptr null? */
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3605 /* if yes, jump to actual trampoline */
3606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3610 val_reg = alloc_preg (cfg);
3611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3612 /* is the slot null? */
3613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3614 /* if yes, jump to actual trampoline */
3615 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3618 res_reg = alloc_preg (cfg);
3619 MONO_INST_NEW (cfg, ins, OP_MOVE);
3620 ins->dreg = res_reg;
3621 ins->sreg1 = val_reg;
3622 MONO_ADD_INS (cfg->cbb, ins);
3624 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3627 MONO_START_BB (cfg, is_null_bb);
3629 EMIT_NEW_ICONST (cfg, args [1], index);
3631 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3633 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3634 MONO_INST_NEW (cfg, ins, OP_MOVE);
3635 ins->dreg = res_reg;
3636 ins->sreg1 = call->dreg;
3637 MONO_ADD_INS (cfg->cbb, ins);
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3640 MONO_START_BB (cfg, end_bb);
3649 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3652 static inline MonoInst*
3653 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3656 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3658 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3662 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3663 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3665 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3666 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3668 return emit_rgctx_fetch (cfg, rgctx, entry);
3672 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3673 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3675 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3676 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3678 return emit_rgctx_fetch (cfg, rgctx, entry);
3682 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3683 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3685 MonoJumpInfoGSharedVtCall *call_info;
3686 MonoJumpInfoRgctxEntry *entry;
3689 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3690 call_info->sig = sig;
3691 call_info->method = cmethod;
3693 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3694 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3696 return emit_rgctx_fetch (cfg, rgctx, entry);
3700 * emit_get_rgctx_virt_method:
3702 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3705 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3706 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3708 MonoJumpInfoVirtMethod *info;
3709 MonoJumpInfoRgctxEntry *entry;
3712 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3713 info->klass = klass;
3714 info->method = virt_method;
3716 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3717 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3724 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3726 MonoJumpInfoRgctxEntry *entry;
3729 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3730 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3732 return emit_rgctx_fetch (cfg, rgctx, entry);
3736 * emit_get_rgctx_method:
3738 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3739 * normal constants, else emit a load from the rgctx.
3742 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3743 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3745 if (!context_used) {
3748 switch (rgctx_type) {
3749 case MONO_RGCTX_INFO_METHOD:
3750 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3752 case MONO_RGCTX_INFO_METHOD_RGCTX:
3753 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3756 g_assert_not_reached ();
3759 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3760 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3762 return emit_rgctx_fetch (cfg, rgctx, entry);
3767 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3768 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3770 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3771 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3773 return emit_rgctx_fetch (cfg, rgctx, entry);
3777 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3779 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3780 MonoRuntimeGenericContextInfoTemplate *template_;
3785 for (i = 0; i < info->num_entries; ++i) {
3786 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3788 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3792 if (info->num_entries == info->count_entries) {
3793 MonoRuntimeGenericContextInfoTemplate *new_entries;
3794 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3796 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3798 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3799 info->entries = new_entries;
3800 info->count_entries = new_count_entries;
3803 idx = info->num_entries;
3804 template_ = &info->entries [idx];
3805 template_->info_type = rgctx_type;
3806 template_->data = data;
3808 info->num_entries ++;
3814 * emit_get_gsharedvt_info:
3816 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3819 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3824 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3825 /* Load info->entries [idx] */
3826 dreg = alloc_preg (cfg);
3827 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3833 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3835 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3839 * On return the caller must check @klass for load errors.
3842 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3844 MonoInst *vtable_arg;
3847 context_used = mini_class_check_context_used (cfg, klass);
3850 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3851 klass, MONO_RGCTX_INFO_VTABLE);
3853 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3857 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3860 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3864 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3865 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3867 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3868 ins->sreg1 = vtable_arg->dreg;
3869 MONO_ADD_INS (cfg->cbb, ins);
3871 static int byte_offset = -1;
3872 static guint8 bitmask;
3873 int bits_reg, inited_reg;
3874 MonoBasicBlock *inited_bb;
3875 MonoInst *args [16];
3877 if (byte_offset < 0)
3878 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3880 bits_reg = alloc_ireg (cfg);
3881 inited_reg = alloc_ireg (cfg);
3883 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3884 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3886 NEW_BBLOCK (cfg, inited_bb);
3888 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3889 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3891 args [0] = vtable_arg;
3892 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3894 MONO_START_BB (cfg, inited_bb);
3899 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3903 if (cfg->gen_seq_points && cfg->method == method) {
3904 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3906 ins->flags |= MONO_INST_NONEMPTY_STACK;
3907 MONO_ADD_INS (cfg->cbb, ins);
3912 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3914 if (mini_get_debug_options ()->better_cast_details) {
3915 int vtable_reg = alloc_preg (cfg);
3916 int klass_reg = alloc_preg (cfg);
3917 MonoBasicBlock *is_null_bb = NULL;
3919 int to_klass_reg, context_used;
3922 NEW_BBLOCK (cfg, is_null_bb);
3924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3925 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3928 tls_get = mono_get_jit_tls_intrinsic (cfg);
3930 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3934 MONO_ADD_INS (cfg->cbb, tls_get);
3935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3938 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3940 context_used = mini_class_check_context_used (cfg, klass);
3942 MonoInst *class_ins;
3944 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3945 to_klass_reg = class_ins->dreg;
3947 to_klass_reg = alloc_preg (cfg);
3948 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3950 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3953 MONO_START_BB (cfg, is_null_bb);
3958 reset_cast_details (MonoCompile *cfg)
3960 /* Reset the variables holding the cast details */
3961 if (mini_get_debug_options ()->better_cast_details) {
3962 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3964 MONO_ADD_INS (cfg->cbb, tls_get);
3965 /* It is enough to reset the from field */
3966 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3971 * On return the caller must check @array_class for load errors
3974 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3976 int vtable_reg = alloc_preg (cfg);
3979 context_used = mini_class_check_context_used (cfg, array_class);
3981 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3983 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3985 if (cfg->opt & MONO_OPT_SHARED) {
3986 int class_reg = alloc_preg (cfg);
3989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3990 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3991 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3992 } else if (context_used) {
3993 MonoInst *vtable_ins;
3995 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3996 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3998 if (cfg->compile_aot) {
4002 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4004 vt_reg = alloc_preg (cfg);
4005 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
4006 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
4009 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4015 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4017 reset_cast_details (cfg);
4021 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4022 * generic code is generated.
4025 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4027 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4030 MonoInst *rgctx, *addr;
4032 /* FIXME: What if the class is shared? We might not
4033 have to get the address of the method from the
4035 addr = emit_get_rgctx_method (cfg, context_used, method,
4036 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4037 if (cfg->llvm_only && cfg->gsharedvt) {
4038 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4040 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4042 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4045 gboolean pass_vtable, pass_mrgctx;
4046 MonoInst *rgctx_arg = NULL;
4048 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4049 g_assert (!pass_mrgctx);
4052 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4055 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4058 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4063 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4067 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4068 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4069 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4070 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4072 obj_reg = sp [0]->dreg;
4073 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4074 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4076 /* FIXME: generics */
4077 g_assert (klass->rank == 0);
4080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4081 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4087 MonoInst *element_class;
4089 /* This assertion is from the unboxcast insn */
4090 g_assert (klass->rank == 0);
4092 element_class = emit_get_rgctx_klass (cfg, context_used,
4093 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4095 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4096 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4098 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4099 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4100 reset_cast_details (cfg);
4103 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4104 MONO_ADD_INS (cfg->cbb, add);
4105 add->type = STACK_MP;
4112 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4114 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4115 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4119 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4125 args [1] = klass_inst;
4128 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4130 NEW_BBLOCK (cfg, is_ref_bb);
4131 NEW_BBLOCK (cfg, is_nullable_bb);
4132 NEW_BBLOCK (cfg, end_bb);
4133 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4140 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4141 addr_reg = alloc_dreg (cfg, STACK_MP);
4145 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4146 MONO_ADD_INS (cfg->cbb, addr);
4148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4151 MONO_START_BB (cfg, is_ref_bb);
4153 /* Save the ref to a temporary */
4154 dreg = alloc_ireg (cfg);
4155 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4156 addr->dreg = addr_reg;
4157 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4161 MONO_START_BB (cfg, is_nullable_bb);
4164 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4165 MonoInst *unbox_call;
4166 MonoMethodSignature *unbox_sig;
4168 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4169 unbox_sig->ret = &klass->byval_arg;
4170 unbox_sig->param_count = 1;
4171 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4174 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4176 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4178 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4179 addr->dreg = addr_reg;
4182 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4185 MONO_START_BB (cfg, end_bb);
4188 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4194 * Returns NULL and set the cfg exception on error.
4197 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4199 MonoInst *iargs [2];
4204 MonoRgctxInfoType rgctx_info;
4205 MonoInst *iargs [2];
4206 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4208 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4210 if (cfg->opt & MONO_OPT_SHARED)
4211 rgctx_info = MONO_RGCTX_INFO_KLASS;
4213 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4214 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4216 if (cfg->opt & MONO_OPT_SHARED) {
4217 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4219 alloc_ftn = ves_icall_object_new;
4222 alloc_ftn = ves_icall_object_new_specific;
4225 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4226 if (known_instance_size) {
4227 int size = mono_class_instance_size (klass);
4228 if (size < sizeof (MonoObject))
4229 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4231 EMIT_NEW_ICONST (cfg, iargs [1], size);
4233 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4236 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4239 if (cfg->opt & MONO_OPT_SHARED) {
4240 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4241 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4243 alloc_ftn = ves_icall_object_new;
4244 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4245 /* This happens often in argument checking code, eg. throw new FooException... */
4246 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4247 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4248 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4250 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4251 MonoMethod *managed_alloc = NULL;
4255 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4256 cfg->exception_ptr = klass;
4260 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4262 if (managed_alloc) {
4263 int size = mono_class_instance_size (klass);
4264 if (size < sizeof (MonoObject))
4265 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4267 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4268 EMIT_NEW_ICONST (cfg, iargs [1], size);
4269 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4271 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4273 guint32 lw = vtable->klass->instance_size;
4274 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4275 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4276 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4279 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4283 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4287 * Returns NULL and set the cfg exception on error.
4290 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4292 MonoInst *alloc, *ins;
4294 if (mono_class_is_nullable (klass)) {
4295 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4298 if (cfg->llvm_only && cfg->gsharedvt) {
4299 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4300 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4301 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4303 /* FIXME: What if the class is shared? We might not
4304 have to get the method address from the RGCTX. */
4305 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4306 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4307 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4309 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4312 gboolean pass_vtable, pass_mrgctx;
4313 MonoInst *rgctx_arg = NULL;
4315 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4316 g_assert (!pass_mrgctx);
4319 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4322 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4325 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4329 if (mini_is_gsharedvt_klass (klass)) {
4330 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4331 MonoInst *res, *is_ref, *src_var, *addr;
4334 dreg = alloc_ireg (cfg);
4336 NEW_BBLOCK (cfg, is_ref_bb);
4337 NEW_BBLOCK (cfg, is_nullable_bb);
4338 NEW_BBLOCK (cfg, end_bb);
4339 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4344 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4347 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4350 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4351 ins->opcode = OP_STOREV_MEMBASE;
4353 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4354 res->type = STACK_OBJ;
4356 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4359 MONO_START_BB (cfg, is_ref_bb);
4361 /* val is a vtype, so has to load the value manually */
4362 src_var = get_vreg_to_inst (cfg, val->dreg);
4364 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4365 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4370 MONO_START_BB (cfg, is_nullable_bb);
4373 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4374 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4376 MonoMethodSignature *box_sig;
4379 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4380 * construct that method at JIT time, so have to do things by hand.
4382 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4383 box_sig->ret = &mono_defaults.object_class->byval_arg;
4384 box_sig->param_count = 1;
4385 box_sig->params [0] = &klass->byval_arg;
4388 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4390 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4391 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4392 res->type = STACK_OBJ;
4396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4398 MONO_START_BB (cfg, end_bb);
4402 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4406 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4412 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4415 MonoGenericContainer *container;
4416 MonoGenericInst *ginst;
4418 if (klass->generic_class) {
4419 container = klass->generic_class->container_class->generic_container;
4420 ginst = klass->generic_class->context.class_inst;
4421 } else if (klass->generic_container && context_used) {
4422 container = klass->generic_container;
4423 ginst = container->context.class_inst;
4428 for (i = 0; i < container->type_argc; ++i) {
4430 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4432 type = ginst->type_argv [i];
4433 if (mini_type_is_reference (type))
4439 static GHashTable* direct_icall_type_hash;
4442 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4444 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4445 if (!direct_icalls_enabled (cfg))
4449 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4450 * Whitelist a few icalls for now.
4452 if (!direct_icall_type_hash) {
4453 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4455 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4456 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4457 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4458 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4459 mono_memory_barrier ();
4460 direct_icall_type_hash = h;
4463 if (cmethod->klass == mono_defaults.math_class)
4465 /* No locking needed */
4466 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4471 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4474 emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4476 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4477 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4481 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4483 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
4486 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4487 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4488 reset_cast_details (cfg);
4494 get_castclass_cache_idx (MonoCompile *cfg)
4496 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4497 cfg->castclass_cache_index ++;
4498 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4503 emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4508 args [0] = obj; /* obj */
4509 EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
4511 idx = get_castclass_cache_idx (cfg); /* inline cache*/
4512 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4514 return emit_isinst_with_cache (cfg, klass, args);
4518 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4527 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4530 idx = get_castclass_cache_idx (cfg);
4531 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4533 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4534 return emit_castclass_with_cache (cfg, klass, args);
4538 * Returns NULL and set the cfg exception on error.
4541 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4543 MonoBasicBlock *is_null_bb;
4544 int obj_reg = src->dreg;
4545 int vtable_reg = alloc_preg (cfg);
4546 MonoInst *klass_inst = NULL;
4548 if (src->opcode == OP_PCONST && src->inst_p0 == 0)
4554 if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4555 MonoInst *cache_ins;
4557 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4562 /* klass - it's the second element of the cache entry*/
4563 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4566 args [2] = cache_ins;
4568 return emit_castclass_with_cache (cfg, klass, args);
4571 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4574 NEW_BBLOCK (cfg, is_null_bb);
4576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4579 save_cast_details (cfg, klass, obj_reg, FALSE);
4581 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4583 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4585 int klass_reg = alloc_preg (cfg);
4587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4589 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4590 /* the remoting code is broken, access the class for now */
4591 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4592 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4594 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4595 cfg->exception_ptr = klass;
4598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4603 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4606 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4610 MONO_START_BB (cfg, is_null_bb);
4612 reset_cast_details (cfg);
4618 * Returns NULL and set the cfg exception on error.
4621 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4624 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4625 int obj_reg = src->dreg;
4626 int vtable_reg = alloc_preg (cfg);
4627 int res_reg = alloc_ireg_ref (cfg);
4628 MonoInst *klass_inst = NULL;
4633 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4634 MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4636 args [0] = src; /* obj */
4638 /* klass - it's the second element of the cache entry*/
4639 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4641 args [2] = cache_ins; /* cache */
4642 return emit_isinst_with_cache (cfg, klass, args);
4645 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4648 NEW_BBLOCK (cfg, is_null_bb);
4649 NEW_BBLOCK (cfg, false_bb);
4650 NEW_BBLOCK (cfg, end_bb);
4652 /* Do the assignment at the beginning, so the other assignment can be if converted */
4653 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4654 ins->type = STACK_OBJ;
4657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4658 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4662 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4663 g_assert (!context_used);
4664 /* the is_null_bb target simply copies the input register to the output */
4665 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4667 int klass_reg = alloc_preg (cfg);
4670 int rank_reg = alloc_preg (cfg);
4671 int eclass_reg = alloc_preg (cfg);
4673 g_assert (!context_used);
4674 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4679 if (klass->cast_class == mono_defaults.object_class) {
4680 int parent_reg = alloc_preg (cfg);
4681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4682 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4683 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4685 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4686 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4687 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4689 } else if (klass->cast_class == mono_defaults.enum_class) {
4690 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4692 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4693 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4695 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4696 /* Check that the object is a vector too */
4697 int bounds_reg = alloc_preg (cfg);
4698 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4703 /* the is_null_bb target simply copies the input register to the output */
4704 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4706 } else if (mono_class_is_nullable (klass)) {
4707 g_assert (!context_used);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4709 /* the is_null_bb target simply copies the input register to the output */
4710 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4712 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4713 g_assert (!context_used);
4714 /* the remoting code is broken, access the class for now */
4715 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4716 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4718 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4719 cfg->exception_ptr = klass;
4722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4731 /* the is_null_bb target simply copies the input register to the output */
4732 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4737 MONO_START_BB (cfg, false_bb);
4739 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4742 MONO_START_BB (cfg, is_null_bb);
4744 MONO_START_BB (cfg, end_bb);
4750 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4752 /* This opcode takes as input an object reference and a class, and returns:
4753 0) if the object is an instance of the class,
4754 1) if the object is not instance of the class,
4755 2) if the object is a proxy whose type cannot be determined */
4758 #ifndef DISABLE_REMOTING
4759 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4761 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4763 int obj_reg = src->dreg;
4764 int dreg = alloc_ireg (cfg);
4766 #ifndef DISABLE_REMOTING
4767 int klass_reg = alloc_preg (cfg);
4770 NEW_BBLOCK (cfg, true_bb);
4771 NEW_BBLOCK (cfg, false_bb);
4772 NEW_BBLOCK (cfg, end_bb);
4773 #ifndef DISABLE_REMOTING
4774 NEW_BBLOCK (cfg, false2_bb);
4775 NEW_BBLOCK (cfg, no_proxy_bb);
4778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4779 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4781 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4782 #ifndef DISABLE_REMOTING
4783 NEW_BBLOCK (cfg, interface_fail_bb);
4786 tmp_reg = alloc_preg (cfg);
4787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4788 #ifndef DISABLE_REMOTING
4789 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4790 MONO_START_BB (cfg, interface_fail_bb);
4791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4793 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4795 tmp_reg = alloc_preg (cfg);
4796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4800 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4803 #ifndef DISABLE_REMOTING
4804 tmp_reg = alloc_preg (cfg);
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4808 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4809 tmp_reg = alloc_preg (cfg);
4810 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4813 tmp_reg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4816 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4818 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4819 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4821 MONO_START_BB (cfg, no_proxy_bb);
4823 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4825 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4829 MONO_START_BB (cfg, false_bb);
4831 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4832 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4834 #ifndef DISABLE_REMOTING
4835 MONO_START_BB (cfg, false2_bb);
4837 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4841 MONO_START_BB (cfg, true_bb);
4843 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4845 MONO_START_BB (cfg, end_bb);
4848 MONO_INST_NEW (cfg, ins, OP_ICONST);
4850 ins->type = STACK_I4;
4856 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4858 /* This opcode takes as input an object reference and a class, and returns:
4859 0) if the object is an instance of the class,
4860 1) if the object is a proxy whose type cannot be determined
4861 an InvalidCastException exception is thrown otherwhise*/
4864 #ifndef DISABLE_REMOTING
4865 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4867 MonoBasicBlock *ok_result_bb;
4869 int obj_reg = src->dreg;
4870 int dreg = alloc_ireg (cfg);
4871 int tmp_reg = alloc_preg (cfg);
4873 #ifndef DISABLE_REMOTING
4874 int klass_reg = alloc_preg (cfg);
4875 NEW_BBLOCK (cfg, end_bb);
4878 NEW_BBLOCK (cfg, ok_result_bb);
4880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4883 save_cast_details (cfg, klass, obj_reg, FALSE);
4885 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4886 #ifndef DISABLE_REMOTING
4887 NEW_BBLOCK (cfg, interface_fail_bb);
4889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4890 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4891 MONO_START_BB (cfg, interface_fail_bb);
4892 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4894 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4896 tmp_reg = alloc_preg (cfg);
4897 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4899 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4901 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4902 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4905 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4909 #ifndef DISABLE_REMOTING
4910 NEW_BBLOCK (cfg, no_proxy_bb);
4912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4914 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4916 tmp_reg = alloc_preg (cfg);
4917 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4920 tmp_reg = alloc_preg (cfg);
4921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4925 NEW_BBLOCK (cfg, fail_1_bb);
4927 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4929 MONO_START_BB (cfg, fail_1_bb);
4931 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4934 MONO_START_BB (cfg, no_proxy_bb);
4936 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4938 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4942 MONO_START_BB (cfg, ok_result_bb);
4944 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4946 #ifndef DISABLE_REMOTING
4947 MONO_START_BB (cfg, end_bb);
4951 MONO_INST_NEW (cfg, ins, OP_ICONST);
4953 ins->type = STACK_I4;
4958 static G_GNUC_UNUSED MonoInst*
4959 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4961 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4962 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4965 switch (enum_type->type) {
4968 #if SIZEOF_REGISTER == 8
4980 MonoInst *load, *and_, *cmp, *ceq;
4981 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4982 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4983 int dest_reg = alloc_ireg (cfg);
4985 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4986 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4987 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4988 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4990 ceq->type = STACK_I4;
4993 load = mono_decompose_opcode (cfg, load);
4994 and_ = mono_decompose_opcode (cfg, and_);
4995 cmp = mono_decompose_opcode (cfg, cmp);
4996 ceq = mono_decompose_opcode (cfg, ceq);
5004 * Returns NULL and set the cfg exception on error.
5006 static G_GNUC_UNUSED MonoInst*
5007 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5011 gpointer trampoline;
5012 MonoInst *obj, *method_ins, *tramp_ins;
5016 if (virtual_ && !cfg->llvm_only) {
5017 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5020 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5024 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5028 /* Inline the contents of mono_delegate_ctor */
5030 /* Set target field */
5031 /* Optimize away setting of NULL target */
5032 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5033 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5034 if (cfg->gen_write_barriers) {
5035 dreg = alloc_preg (cfg);
5036 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5037 emit_write_barrier (cfg, ptr, target);
5041 /* Set method field */
5042 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5046 * To avoid looking up the compiled code belonging to the target method
5047 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5048 * store it, and we fill it after the method has been compiled.
5050 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5051 MonoInst *code_slot_ins;
5054 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5056 domain = mono_domain_get ();
5057 mono_domain_lock (domain);
5058 if (!domain_jit_info (domain)->method_code_hash)
5059 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5060 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5062 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5063 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5065 mono_domain_unlock (domain);
5067 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5069 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5072 if (cfg->llvm_only) {
5073 MonoInst *args [16];
5078 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5079 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5082 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5088 if (cfg->compile_aot) {
5089 MonoDelegateClassMethodPair *del_tramp;
5091 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5092 del_tramp->klass = klass;
5093 del_tramp->method = context_used ? NULL : method;
5094 del_tramp->is_virtual = virtual_;
5095 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5098 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5100 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5101 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5104 /* Set invoke_impl field */
5106 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5108 dreg = alloc_preg (cfg);
5109 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5112 dreg = alloc_preg (cfg);
5113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5114 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5117 dreg = alloc_preg (cfg);
5118 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5119 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5121 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5127 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5129 MonoJitICallInfo *info;
5131 /* Need to register the icall so it gets an icall wrapper */
5132 info = mono_get_array_new_va_icall (rank);
5134 cfg->flags |= MONO_CFG_HAS_VARARGS;
5136 /* mono_array_new_va () needs a vararg calling convention */
5137 cfg->exception_message = g_strdup ("array-new");
5138 cfg->disable_llvm = TRUE;
5140 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5141 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5145 * handle_constrained_gsharedvt_call:
5147 * Handle constrained calls where the receiver is a gsharedvt type.
5148 * Return the instruction representing the call. Set the cfg exception on failure.
5151 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5152 gboolean *ref_emit_widen)
5154 MonoInst *ins = NULL;
5155 gboolean emit_widen = *ref_emit_widen;
5158 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5159 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5160 * pack the arguments into an array, and do the rest of the work in in an icall.
5162 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5163 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5164 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5165 MonoInst *args [16];
5168 * This case handles calls to
5169 * - object:ToString()/Equals()/GetHashCode(),
5170 * - System.IComparable<T>:CompareTo()
5171 * - System.IEquatable<T>:Equals ()
5172 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5176 if (mono_method_check_context_used (cmethod))
5177 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5179 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5180 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5182 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5183 if (fsig->hasthis && fsig->param_count) {
5184 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5185 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5186 ins->dreg = alloc_preg (cfg);
5187 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5188 MONO_ADD_INS (cfg->cbb, ins);
5191 if (mini_is_gsharedvt_type (fsig->params [0])) {
5192 int addr_reg, deref_arg_reg;
5194 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5195 deref_arg_reg = alloc_preg (cfg);
5196 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5197 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5199 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5200 addr_reg = ins->dreg;
5201 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5203 EMIT_NEW_ICONST (cfg, args [3], 0);
5204 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5207 EMIT_NEW_ICONST (cfg, args [3], 0);
5208 EMIT_NEW_ICONST (cfg, args [4], 0);
5210 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5213 if (mini_is_gsharedvt_type (fsig->ret)) {
5214 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5215 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5219 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5220 MONO_ADD_INS (cfg->cbb, add);
5222 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5223 MONO_ADD_INS (cfg->cbb, ins);
5224 /* ins represents the call result */
5227 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5230 *ref_emit_widen = emit_widen;
5239 mono_emit_load_got_addr (MonoCompile *cfg)
5241 MonoInst *getaddr, *dummy_use;
5243 if (!cfg->got_var || cfg->got_var_allocated)
5246 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5247 getaddr->cil_code = cfg->header->code;
5248 getaddr->dreg = cfg->got_var->dreg;
5250 /* Add it to the start of the first bblock */
5251 if (cfg->bb_entry->code) {
5252 getaddr->next = cfg->bb_entry->code;
5253 cfg->bb_entry->code = getaddr;
5256 MONO_ADD_INS (cfg->bb_entry, getaddr);
5258 cfg->got_var_allocated = TRUE;
5261 * Add a dummy use to keep the got_var alive, since real uses might
5262 * only be generated by the back ends.
5263 * Add it to end_bblock, so the variable's lifetime covers the whole
5265 * It would be better to make the usage of the got var explicit in all
5266 * cases when the backend needs it (i.e. calls, throw etc.), so this
5267 * wouldn't be needed.
5269 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5270 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5273 static int inline_limit;
5274 static gboolean inline_limit_inited;
5277 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5279 MonoMethodHeaderSummary header;
5281 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5282 MonoMethodSignature *sig = mono_method_signature (method);
5286 if (cfg->disable_inline)
5291 if (cfg->inline_depth > 10)
5294 if (!mono_method_get_header_summary (method, &header))
5297 /*runtime, icall and pinvoke are checked by summary call*/
5298 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5299 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5300 (mono_class_is_marshalbyref (method->klass)) ||
5304 /* also consider num_locals? */
5305 /* Do the size check early to avoid creating vtables */
5306 if (!inline_limit_inited) {
5307 if (g_getenv ("MONO_INLINELIMIT"))
5308 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5310 inline_limit = INLINE_LENGTH_LIMIT;
5311 inline_limit_inited = TRUE;
5313 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5317 * if we can initialize the class of the method right away, we do,
5318 * otherwise we don't allow inlining if the class needs initialization,
5319 * since it would mean inserting a call to mono_runtime_class_init()
5320 * inside the inlined code
5322 if (!(cfg->opt & MONO_OPT_SHARED)) {
5323 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5324 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5325 vtable = mono_class_vtable (cfg->domain, method->klass);
5328 if (!cfg->compile_aot) {
5330 if (!mono_runtime_class_init_full (vtable, &error)) {
5331 mono_error_cleanup (&error);
5335 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5336 if (cfg->run_cctors && method->klass->has_cctor) {
5337 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5338 if (!method->klass->runtime_info)
5339 /* No vtable created yet */
5341 vtable = mono_class_vtable (cfg->domain, method->klass);
5344 /* This makes so that inline cannot trigger */
5345 /* .cctors: too many apps depend on them */
5346 /* running with a specific order... */
5347 if (! vtable->initialized)
5350 if (!mono_runtime_class_init_full (vtable, &error)) {
5351 mono_error_cleanup (&error);
5355 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5356 if (!method->klass->runtime_info)
5357 /* No vtable created yet */
5359 vtable = mono_class_vtable (cfg->domain, method->klass);
5362 if (!vtable->initialized)
5367 * If we're compiling for shared code
5368 * the cctor will need to be run at aot method load time, for example,
5369 * or at the end of the compilation of the inlining method.
5371 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5375 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5376 if (mono_arch_is_soft_float ()) {
5378 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5380 for (i = 0; i < sig->param_count; ++i)
5381 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5386 if (g_list_find (cfg->dont_inline, method))
5393 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5395 if (!cfg->compile_aot) {
5397 if (vtable->initialized)
5401 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5402 if (cfg->method == method)
5406 if (!mono_class_needs_cctor_run (klass, method))
5409 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5410 /* The initialization is already done before the method is called */
5417 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5421 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5424 if (mini_is_gsharedvt_variable_klass (klass)) {
5427 mono_class_init (klass);
5428 size = mono_class_array_element_size (klass);
5431 mult_reg = alloc_preg (cfg);
5432 array_reg = arr->dreg;
5433 index_reg = index->dreg;
5435 #if SIZEOF_REGISTER == 8
5436 /* The array reg is 64 bits but the index reg is only 32 */
5437 if (COMPILE_LLVM (cfg)) {
5439 index2_reg = index_reg;
5441 index2_reg = alloc_preg (cfg);
5442 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5445 if (index->type == STACK_I8) {
5446 index2_reg = alloc_preg (cfg);
5447 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5449 index2_reg = index_reg;
5454 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5456 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5457 if (size == 1 || size == 2 || size == 4 || size == 8) {
5458 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5460 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5461 ins->klass = mono_class_get_element_class (klass);
5462 ins->type = STACK_MP;
5468 add_reg = alloc_ireg_mp (cfg);
5471 MonoInst *rgctx_ins;
5474 g_assert (cfg->gshared);
5475 context_used = mini_class_check_context_used (cfg, klass);
5476 g_assert (context_used);
5477 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5478 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5482 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5483 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5484 ins->klass = mono_class_get_element_class (klass);
5485 ins->type = STACK_MP;
5486 MONO_ADD_INS (cfg->cbb, ins);
5492 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5494 int bounds_reg = alloc_preg (cfg);
5495 int add_reg = alloc_ireg_mp (cfg);
5496 int mult_reg = alloc_preg (cfg);
5497 int mult2_reg = alloc_preg (cfg);
5498 int low1_reg = alloc_preg (cfg);
5499 int low2_reg = alloc_preg (cfg);
5500 int high1_reg = alloc_preg (cfg);
5501 int high2_reg = alloc_preg (cfg);
5502 int realidx1_reg = alloc_preg (cfg);
5503 int realidx2_reg = alloc_preg (cfg);
5504 int sum_reg = alloc_preg (cfg);
5505 int index1, index2, tmpreg;
5509 mono_class_init (klass);
5510 size = mono_class_array_element_size (klass);
5512 index1 = index_ins1->dreg;
5513 index2 = index_ins2->dreg;
5515 #if SIZEOF_REGISTER == 8
5516 /* The array reg is 64 bits but the index reg is only 32 */
5517 if (COMPILE_LLVM (cfg)) {
5520 tmpreg = alloc_preg (cfg);
5521 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5523 tmpreg = alloc_preg (cfg);
5524 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5528 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5532 /* range checking */
5533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5534 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5537 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5538 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5539 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5540 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5542 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5545 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5546 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5547 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5548 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5549 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5550 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5552 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5553 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5555 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5556 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5558 ins->type = STACK_MP;
5560 MONO_ADD_INS (cfg->cbb, ins);
5566 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5570 MonoMethod *addr_method;
5572 MonoClass *eclass = cmethod->klass->element_class;
5574 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5577 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5579 /* emit_ldelema_2 depends on OP_LMUL */
5580 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5581 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5584 if (mini_is_gsharedvt_variable_klass (eclass))
5587 element_size = mono_class_array_element_size (eclass);
5588 addr_method = mono_marshal_get_array_address (rank, element_size);
5589 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5594 static MonoBreakPolicy
5595 always_insert_breakpoint (MonoMethod *method)
5597 return MONO_BREAK_POLICY_ALWAYS;
5600 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5603 * mono_set_break_policy:
5604 * policy_callback: the new callback function
5606 * Allow embedders to decide wherther to actually obey breakpoint instructions
5607 * (both break IL instructions and Debugger.Break () method calls), for example
5608 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5609 * untrusted or semi-trusted code.
5611 * @policy_callback will be called every time a break point instruction needs to
5612 * be inserted with the method argument being the method that calls Debugger.Break()
5613 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5614 * if it wants the breakpoint to not be effective in the given method.
5615 * #MONO_BREAK_POLICY_ALWAYS is the default.
5618 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5620 if (policy_callback)
5621 break_policy_func = policy_callback;
5623 break_policy_func = always_insert_breakpoint;
5627 should_insert_brekpoint (MonoMethod *method) {
5628 switch (break_policy_func (method)) {
5629 case MONO_BREAK_POLICY_ALWAYS:
5631 case MONO_BREAK_POLICY_NEVER:
5633 case MONO_BREAK_POLICY_ON_DBG:
5634 g_warning ("mdb no longer supported");
5637 g_warning ("Incorrect value returned from break policy callback");
5642 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5644 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5646 MonoInst *addr, *store, *load;
5647 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5649 /* the bounds check is already done by the callers */
5650 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5653 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5654 if (mini_type_is_reference (fsig->params [2]))
5655 emit_write_barrier (cfg, addr, load);
5657 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5665 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5667 return mini_type_is_reference (&klass->byval_arg);
5671 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5673 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5674 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5675 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5676 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5677 MonoInst *iargs [3];
5680 mono_class_setup_vtable (obj_array);
5681 g_assert (helper->slot);
5683 if (sp [0]->type != STACK_OBJ)
5685 if (sp [2]->type != STACK_OBJ)
5692 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5696 if (mini_is_gsharedvt_variable_klass (klass)) {
5699 // FIXME-VT: OP_ICONST optimization
5700 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5701 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5702 ins->opcode = OP_STOREV_MEMBASE;
5703 } else if (sp [1]->opcode == OP_ICONST) {
5704 int array_reg = sp [0]->dreg;
5705 int index_reg = sp [1]->dreg;
5706 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5708 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5709 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5712 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5715 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5716 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5717 if (generic_class_is_reference_type (cfg, klass))
5718 emit_write_barrier (cfg, addr, sp [2]);
5725 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5730 eklass = mono_class_from_mono_type (fsig->params [2]);
5732 eklass = mono_class_from_mono_type (fsig->ret);
5735 return emit_array_store (cfg, eklass, args, FALSE);
5737 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5738 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5744 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5747 int param_size, return_size;
5749 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5750 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5752 if (cfg->verbose_level > 3)
5753 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5755 //Don't allow mixing reference types with value types
5756 if (param_klass->valuetype != return_klass->valuetype) {
5757 if (cfg->verbose_level > 3)
5758 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5762 if (!param_klass->valuetype) {
5763 if (cfg->verbose_level > 3)
5764 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5769 if (param_klass->has_references || return_klass->has_references)
5772 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5773 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5774 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5775 if (cfg->verbose_level > 3)
5776 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5780 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5781 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5782 if (cfg->verbose_level > 3)
5783 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5787 param_size = mono_class_value_size (param_klass, &align);
5788 return_size = mono_class_value_size (return_klass, &align);
5790 //We can do it if sizes match
5791 if (param_size == return_size) {
5792 if (cfg->verbose_level > 3)
5793 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5797 //No simple way to handle struct if sizes don't match
5798 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5799 if (cfg->verbose_level > 3)
5800 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5805 * Same reg size category.
5806 * A quick note on why we don't require widening here.
5807 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5809 * Since the source value comes from a function argument, the JIT will already have
5810 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5812 if (param_size <= 4 && return_size <= 4) {
5813 if (cfg->verbose_level > 3)
5814 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5822 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5824 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5825 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5827 if (mini_is_gsharedvt_variable_type (fsig->ret))
5830 //Valuetypes that are semantically equivalent or numbers than can be widened to
5831 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5834 //Arrays of valuetypes that are semantically equivalent
5835 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5842 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5844 #ifdef MONO_ARCH_SIMD_INTRINSICS
5845 MonoInst *ins = NULL;
5847 if (cfg->opt & MONO_OPT_SIMD) {
5848 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5854 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5858 emit_memory_barrier (MonoCompile *cfg, int kind)
5860 MonoInst *ins = NULL;
5861 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5862 MONO_ADD_INS (cfg->cbb, ins);
5863 ins->backend.memory_barrier_kind = kind;
5869 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5871 MonoInst *ins = NULL;
5874 /* The LLVM backend supports these intrinsics */
5875 if (cmethod->klass == mono_defaults.math_class) {
5876 if (strcmp (cmethod->name, "Sin") == 0) {
5878 } else if (strcmp (cmethod->name, "Cos") == 0) {
5880 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5882 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5886 if (opcode && fsig->param_count == 1) {
5887 MONO_INST_NEW (cfg, ins, opcode);
5888 ins->type = STACK_R8;
5889 ins->dreg = mono_alloc_freg (cfg);
5890 ins->sreg1 = args [0]->dreg;
5891 MONO_ADD_INS (cfg->cbb, ins);
5895 if (cfg->opt & MONO_OPT_CMOV) {
5896 if (strcmp (cmethod->name, "Min") == 0) {
5897 if (fsig->params [0]->type == MONO_TYPE_I4)
5899 if (fsig->params [0]->type == MONO_TYPE_U4)
5900 opcode = OP_IMIN_UN;
5901 else if (fsig->params [0]->type == MONO_TYPE_I8)
5903 else if (fsig->params [0]->type == MONO_TYPE_U8)
5904 opcode = OP_LMIN_UN;
5905 } else if (strcmp (cmethod->name, "Max") == 0) {
5906 if (fsig->params [0]->type == MONO_TYPE_I4)
5908 if (fsig->params [0]->type == MONO_TYPE_U4)
5909 opcode = OP_IMAX_UN;
5910 else if (fsig->params [0]->type == MONO_TYPE_I8)
5912 else if (fsig->params [0]->type == MONO_TYPE_U8)
5913 opcode = OP_LMAX_UN;
5917 if (opcode && fsig->param_count == 2) {
5918 MONO_INST_NEW (cfg, ins, opcode);
5919 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5920 ins->dreg = mono_alloc_ireg (cfg);
5921 ins->sreg1 = args [0]->dreg;
5922 ins->sreg2 = args [1]->dreg;
5923 MONO_ADD_INS (cfg->cbb, ins);
5931 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5933 if (cmethod->klass == mono_defaults.array_class) {
5934 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5935 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5936 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5937 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5938 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5939 return emit_array_unsafe_mov (cfg, fsig, args);
5946 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5948 MonoInst *ins = NULL;
5950 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5952 if (cmethod->klass == mono_defaults.string_class) {
5953 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5954 int dreg = alloc_ireg (cfg);
5955 int index_reg = alloc_preg (cfg);
5956 int add_reg = alloc_preg (cfg);
5958 #if SIZEOF_REGISTER == 8
5959 if (COMPILE_LLVM (cfg)) {
5960 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5962 /* The array reg is 64 bits but the index reg is only 32 */
5963 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5966 index_reg = args [1]->dreg;
5968 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5970 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5971 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5972 add_reg = ins->dreg;
5973 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5976 int mult_reg = alloc_preg (cfg);
5977 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5978 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5979 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5980 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5982 type_from_op (cfg, ins, NULL, NULL);
5984 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5985 int dreg = alloc_ireg (cfg);
5986 /* Decompose later to allow more optimizations */
5987 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5988 ins->type = STACK_I4;
5989 ins->flags |= MONO_INST_FAULT;
5990 cfg->cbb->has_array_access = TRUE;
5991 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5996 } else if (cmethod->klass == mono_defaults.object_class) {
5997 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5998 int dreg = alloc_ireg_ref (cfg);
5999 int vt_reg = alloc_preg (cfg);
6000 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6001 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
6002 type_from_op (cfg, ins, NULL, NULL);
6005 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6006 int dreg = alloc_ireg (cfg);
6007 int t1 = alloc_ireg (cfg);
6009 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6010 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6011 ins->type = STACK_I4;
6014 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6015 MONO_INST_NEW (cfg, ins, OP_NOP);
6016 MONO_ADD_INS (cfg->cbb, ins);
6020 } else if (cmethod->klass == mono_defaults.array_class) {
6021 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6022 return emit_array_generic_access (cfg, fsig, args, FALSE);
6023 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6024 return emit_array_generic_access (cfg, fsig, args, TRUE);
6026 #ifndef MONO_BIG_ARRAYS
6028 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6031 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6032 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6033 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6034 int dreg = alloc_ireg (cfg);
6035 int bounds_reg = alloc_ireg_mp (cfg);
6036 MonoBasicBlock *end_bb, *szarray_bb;
6037 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6039 NEW_BBLOCK (cfg, end_bb);
6040 NEW_BBLOCK (cfg, szarray_bb);
6042 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6043 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6045 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6046 /* Non-szarray case */
6048 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6049 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6052 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6053 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6054 MONO_START_BB (cfg, szarray_bb);
6057 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6058 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6060 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6061 MONO_START_BB (cfg, end_bb);
6063 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6064 ins->type = STACK_I4;
6070 if (cmethod->name [0] != 'g')
6073 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6074 int dreg = alloc_ireg (cfg);
6075 int vtable_reg = alloc_preg (cfg);
6076 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6077 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6079 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6080 type_from_op (cfg, ins, NULL, NULL);
6083 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6084 int dreg = alloc_ireg (cfg);
6086 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6087 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6088 type_from_op (cfg, ins, NULL, NULL);
6093 } else if (cmethod->klass == runtime_helpers_class) {
6094 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6095 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6099 } else if (cmethod->klass == mono_defaults.monitor_class) {
6100 gboolean is_enter = FALSE;
6102 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6107 * To make async stack traces work, icalls which can block should have a wrapper.
6108 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6110 MonoBasicBlock *end_bb;
6112 NEW_BBLOCK (cfg, end_bb);
6114 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter_fast, args);
6115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6116 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6117 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter, args);
6118 MONO_START_BB (cfg, end_bb);
6121 } else if (cmethod->klass == mono_defaults.thread_class) {
6122 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6123 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6124 MONO_ADD_INS (cfg->cbb, ins);
6126 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6127 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6128 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6130 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6132 if (fsig->params [0]->type == MONO_TYPE_I1)
6133 opcode = OP_LOADI1_MEMBASE;
6134 else if (fsig->params [0]->type == MONO_TYPE_U1)
6135 opcode = OP_LOADU1_MEMBASE;
6136 else if (fsig->params [0]->type == MONO_TYPE_I2)
6137 opcode = OP_LOADI2_MEMBASE;
6138 else if (fsig->params [0]->type == MONO_TYPE_U2)
6139 opcode = OP_LOADU2_MEMBASE;
6140 else if (fsig->params [0]->type == MONO_TYPE_I4)
6141 opcode = OP_LOADI4_MEMBASE;
6142 else if (fsig->params [0]->type == MONO_TYPE_U4)
6143 opcode = OP_LOADU4_MEMBASE;
6144 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6145 opcode = OP_LOADI8_MEMBASE;
6146 else if (fsig->params [0]->type == MONO_TYPE_R4)
6147 opcode = OP_LOADR4_MEMBASE;
6148 else if (fsig->params [0]->type == MONO_TYPE_R8)
6149 opcode = OP_LOADR8_MEMBASE;
6150 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6151 opcode = OP_LOAD_MEMBASE;
6154 MONO_INST_NEW (cfg, ins, opcode);
6155 ins->inst_basereg = args [0]->dreg;
6156 ins->inst_offset = 0;
6157 MONO_ADD_INS (cfg->cbb, ins);
6159 switch (fsig->params [0]->type) {
6166 ins->dreg = mono_alloc_ireg (cfg);
6167 ins->type = STACK_I4;
6171 ins->dreg = mono_alloc_lreg (cfg);
6172 ins->type = STACK_I8;
6176 ins->dreg = mono_alloc_ireg (cfg);
6177 #if SIZEOF_REGISTER == 8
6178 ins->type = STACK_I8;
6180 ins->type = STACK_I4;
6185 ins->dreg = mono_alloc_freg (cfg);
6186 ins->type = STACK_R8;
6189 g_assert (mini_type_is_reference (fsig->params [0]));
6190 ins->dreg = mono_alloc_ireg_ref (cfg);
6191 ins->type = STACK_OBJ;
6195 if (opcode == OP_LOADI8_MEMBASE)
6196 ins = mono_decompose_opcode (cfg, ins);
6198 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6202 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6204 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6206 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6207 opcode = OP_STOREI1_MEMBASE_REG;
6208 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6209 opcode = OP_STOREI2_MEMBASE_REG;
6210 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6211 opcode = OP_STOREI4_MEMBASE_REG;
6212 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6213 opcode = OP_STOREI8_MEMBASE_REG;
6214 else if (fsig->params [0]->type == MONO_TYPE_R4)
6215 opcode = OP_STORER4_MEMBASE_REG;
6216 else if (fsig->params [0]->type == MONO_TYPE_R8)
6217 opcode = OP_STORER8_MEMBASE_REG;
6218 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6219 opcode = OP_STORE_MEMBASE_REG;
6222 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6224 MONO_INST_NEW (cfg, ins, opcode);
6225 ins->sreg1 = args [1]->dreg;
6226 ins->inst_destbasereg = args [0]->dreg;
6227 ins->inst_offset = 0;
6228 MONO_ADD_INS (cfg->cbb, ins);
6230 if (opcode == OP_STOREI8_MEMBASE_REG)
6231 ins = mono_decompose_opcode (cfg, ins);
6236 } else if (cmethod->klass->image == mono_defaults.corlib &&
6237 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6238 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6241 #if SIZEOF_REGISTER == 8
6242 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6243 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6244 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6245 ins->dreg = mono_alloc_preg (cfg);
6246 ins->sreg1 = args [0]->dreg;
6247 ins->type = STACK_I8;
6248 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6249 MONO_ADD_INS (cfg->cbb, ins);
6253 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6255 /* 64 bit reads are already atomic */
6256 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6257 load_ins->dreg = mono_alloc_preg (cfg);
6258 load_ins->inst_basereg = args [0]->dreg;
6259 load_ins->inst_offset = 0;
6260 load_ins->type = STACK_I8;
6261 MONO_ADD_INS (cfg->cbb, load_ins);
6263 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6270 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6271 MonoInst *ins_iconst;
6274 if (fsig->params [0]->type == MONO_TYPE_I4) {
6275 opcode = OP_ATOMIC_ADD_I4;
6276 cfg->has_atomic_add_i4 = TRUE;
6278 #if SIZEOF_REGISTER == 8
6279 else if (fsig->params [0]->type == MONO_TYPE_I8)
6280 opcode = OP_ATOMIC_ADD_I8;
6283 if (!mono_arch_opcode_supported (opcode))
6285 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6286 ins_iconst->inst_c0 = 1;
6287 ins_iconst->dreg = mono_alloc_ireg (cfg);
6288 MONO_ADD_INS (cfg->cbb, ins_iconst);
6290 MONO_INST_NEW (cfg, ins, opcode);
6291 ins->dreg = mono_alloc_ireg (cfg);
6292 ins->inst_basereg = args [0]->dreg;
6293 ins->inst_offset = 0;
6294 ins->sreg2 = ins_iconst->dreg;
6295 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6296 MONO_ADD_INS (cfg->cbb, ins);
6298 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6299 MonoInst *ins_iconst;
6302 if (fsig->params [0]->type == MONO_TYPE_I4) {
6303 opcode = OP_ATOMIC_ADD_I4;
6304 cfg->has_atomic_add_i4 = TRUE;
6306 #if SIZEOF_REGISTER == 8
6307 else if (fsig->params [0]->type == MONO_TYPE_I8)
6308 opcode = OP_ATOMIC_ADD_I8;
6311 if (!mono_arch_opcode_supported (opcode))
6313 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6314 ins_iconst->inst_c0 = -1;
6315 ins_iconst->dreg = mono_alloc_ireg (cfg);
6316 MONO_ADD_INS (cfg->cbb, ins_iconst);
6318 MONO_INST_NEW (cfg, ins, opcode);
6319 ins->dreg = mono_alloc_ireg (cfg);
6320 ins->inst_basereg = args [0]->dreg;
6321 ins->inst_offset = 0;
6322 ins->sreg2 = ins_iconst->dreg;
6323 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6324 MONO_ADD_INS (cfg->cbb, ins);
6326 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6329 if (fsig->params [0]->type == MONO_TYPE_I4) {
6330 opcode = OP_ATOMIC_ADD_I4;
6331 cfg->has_atomic_add_i4 = TRUE;
6333 #if SIZEOF_REGISTER == 8
6334 else if (fsig->params [0]->type == MONO_TYPE_I8)
6335 opcode = OP_ATOMIC_ADD_I8;
6338 if (!mono_arch_opcode_supported (opcode))
6340 MONO_INST_NEW (cfg, ins, opcode);
6341 ins->dreg = mono_alloc_ireg (cfg);
6342 ins->inst_basereg = args [0]->dreg;
6343 ins->inst_offset = 0;
6344 ins->sreg2 = args [1]->dreg;
6345 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6346 MONO_ADD_INS (cfg->cbb, ins);
6349 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6350 MonoInst *f2i = NULL, *i2f;
6351 guint32 opcode, f2i_opcode, i2f_opcode;
6352 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6353 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6355 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6356 fsig->params [0]->type == MONO_TYPE_R4) {
6357 opcode = OP_ATOMIC_EXCHANGE_I4;
6358 f2i_opcode = OP_MOVE_F_TO_I4;
6359 i2f_opcode = OP_MOVE_I4_TO_F;
6360 cfg->has_atomic_exchange_i4 = TRUE;
6362 #if SIZEOF_REGISTER == 8
6364 fsig->params [0]->type == MONO_TYPE_I8 ||
6365 fsig->params [0]->type == MONO_TYPE_R8 ||
6366 fsig->params [0]->type == MONO_TYPE_I) {
6367 opcode = OP_ATOMIC_EXCHANGE_I8;
6368 f2i_opcode = OP_MOVE_F_TO_I8;
6369 i2f_opcode = OP_MOVE_I8_TO_F;
6372 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6373 opcode = OP_ATOMIC_EXCHANGE_I4;
6374 cfg->has_atomic_exchange_i4 = TRUE;
6380 if (!mono_arch_opcode_supported (opcode))
6384 /* TODO: Decompose these opcodes instead of bailing here. */
6385 if (COMPILE_SOFT_FLOAT (cfg))
6388 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6389 f2i->dreg = mono_alloc_ireg (cfg);
6390 f2i->sreg1 = args [1]->dreg;
6391 if (f2i_opcode == OP_MOVE_F_TO_I4)
6392 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6393 MONO_ADD_INS (cfg->cbb, f2i);
6396 MONO_INST_NEW (cfg, ins, opcode);
6397 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6398 ins->inst_basereg = args [0]->dreg;
6399 ins->inst_offset = 0;
6400 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6401 MONO_ADD_INS (cfg->cbb, ins);
6403 switch (fsig->params [0]->type) {
6405 ins->type = STACK_I4;
6408 ins->type = STACK_I8;
6411 #if SIZEOF_REGISTER == 8
6412 ins->type = STACK_I8;
6414 ins->type = STACK_I4;
6419 ins->type = STACK_R8;
6422 g_assert (mini_type_is_reference (fsig->params [0]));
6423 ins->type = STACK_OBJ;
6428 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6429 i2f->dreg = mono_alloc_freg (cfg);
6430 i2f->sreg1 = ins->dreg;
6431 i2f->type = STACK_R8;
6432 if (i2f_opcode == OP_MOVE_I4_TO_F)
6433 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6434 MONO_ADD_INS (cfg->cbb, i2f);
6439 if (cfg->gen_write_barriers && is_ref)
6440 emit_write_barrier (cfg, args [0], args [1]);
6442 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6443 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6444 guint32 opcode, f2i_opcode, i2f_opcode;
6445 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6446 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6448 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6449 fsig->params [1]->type == MONO_TYPE_R4) {
6450 opcode = OP_ATOMIC_CAS_I4;
6451 f2i_opcode = OP_MOVE_F_TO_I4;
6452 i2f_opcode = OP_MOVE_I4_TO_F;
6453 cfg->has_atomic_cas_i4 = TRUE;
6455 #if SIZEOF_REGISTER == 8
6457 fsig->params [1]->type == MONO_TYPE_I8 ||
6458 fsig->params [1]->type == MONO_TYPE_R8 ||
6459 fsig->params [1]->type == MONO_TYPE_I) {
6460 opcode = OP_ATOMIC_CAS_I8;
6461 f2i_opcode = OP_MOVE_F_TO_I8;
6462 i2f_opcode = OP_MOVE_I8_TO_F;
6465 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6466 opcode = OP_ATOMIC_CAS_I4;
6467 cfg->has_atomic_cas_i4 = TRUE;
6473 if (!mono_arch_opcode_supported (opcode))
6477 /* TODO: Decompose these opcodes instead of bailing here. */
6478 if (COMPILE_SOFT_FLOAT (cfg))
6481 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6482 f2i_new->dreg = mono_alloc_ireg (cfg);
6483 f2i_new->sreg1 = args [1]->dreg;
6484 if (f2i_opcode == OP_MOVE_F_TO_I4)
6485 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6486 MONO_ADD_INS (cfg->cbb, f2i_new);
6488 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6489 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6490 f2i_cmp->sreg1 = args [2]->dreg;
6491 if (f2i_opcode == OP_MOVE_F_TO_I4)
6492 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6493 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6496 MONO_INST_NEW (cfg, ins, opcode);
6497 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6498 ins->sreg1 = args [0]->dreg;
6499 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6500 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6501 MONO_ADD_INS (cfg->cbb, ins);
6503 switch (fsig->params [1]->type) {
6505 ins->type = STACK_I4;
6508 ins->type = STACK_I8;
6511 #if SIZEOF_REGISTER == 8
6512 ins->type = STACK_I8;
6514 ins->type = STACK_I4;
6518 ins->type = cfg->r4_stack_type;
6521 ins->type = STACK_R8;
6524 g_assert (mini_type_is_reference (fsig->params [1]));
6525 ins->type = STACK_OBJ;
6530 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6531 i2f->dreg = mono_alloc_freg (cfg);
6532 i2f->sreg1 = ins->dreg;
6533 i2f->type = STACK_R8;
6534 if (i2f_opcode == OP_MOVE_I4_TO_F)
6535 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6536 MONO_ADD_INS (cfg->cbb, i2f);
6541 if (cfg->gen_write_barriers && is_ref)
6542 emit_write_barrier (cfg, args [0], args [1]);
6544 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6545 fsig->params [1]->type == MONO_TYPE_I4) {
6546 MonoInst *cmp, *ceq;
6548 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6551 /* int32 r = CAS (location, value, comparand); */
6552 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6553 ins->dreg = alloc_ireg (cfg);
6554 ins->sreg1 = args [0]->dreg;
6555 ins->sreg2 = args [1]->dreg;
6556 ins->sreg3 = args [2]->dreg;
6557 ins->type = STACK_I4;
6558 MONO_ADD_INS (cfg->cbb, ins);
6560 /* bool result = r == comparand; */
6561 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6562 cmp->sreg1 = ins->dreg;
6563 cmp->sreg2 = args [2]->dreg;
6564 cmp->type = STACK_I4;
6565 MONO_ADD_INS (cfg->cbb, cmp);
6567 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6568 ceq->dreg = alloc_ireg (cfg);
6569 ceq->type = STACK_I4;
6570 MONO_ADD_INS (cfg->cbb, ceq);
6572 /* *success = result; */
6573 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6575 cfg->has_atomic_cas_i4 = TRUE;
6577 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6578 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6582 } else if (cmethod->klass->image == mono_defaults.corlib &&
6583 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6584 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6587 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6589 MonoType *t = fsig->params [0];
6591 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6593 g_assert (t->byref);
6594 /* t is a byref type, so the reference check is more complicated */
6595 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6596 if (t->type == MONO_TYPE_I1)
6597 opcode = OP_ATOMIC_LOAD_I1;
6598 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6599 opcode = OP_ATOMIC_LOAD_U1;
6600 else if (t->type == MONO_TYPE_I2)
6601 opcode = OP_ATOMIC_LOAD_I2;
6602 else if (t->type == MONO_TYPE_U2)
6603 opcode = OP_ATOMIC_LOAD_U2;
6604 else if (t->type == MONO_TYPE_I4)
6605 opcode = OP_ATOMIC_LOAD_I4;
6606 else if (t->type == MONO_TYPE_U4)
6607 opcode = OP_ATOMIC_LOAD_U4;
6608 else if (t->type == MONO_TYPE_R4)
6609 opcode = OP_ATOMIC_LOAD_R4;
6610 else if (t->type == MONO_TYPE_R8)
6611 opcode = OP_ATOMIC_LOAD_R8;
6612 #if SIZEOF_REGISTER == 8
6613 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6614 opcode = OP_ATOMIC_LOAD_I8;
6615 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6616 opcode = OP_ATOMIC_LOAD_U8;
6618 else if (t->type == MONO_TYPE_I)
6619 opcode = OP_ATOMIC_LOAD_I4;
6620 else if (is_ref || t->type == MONO_TYPE_U)
6621 opcode = OP_ATOMIC_LOAD_U4;
6625 if (!mono_arch_opcode_supported (opcode))
6628 MONO_INST_NEW (cfg, ins, opcode);
6629 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6630 ins->sreg1 = args [0]->dreg;
6631 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6632 MONO_ADD_INS (cfg->cbb, ins);
6635 case MONO_TYPE_BOOLEAN:
6642 ins->type = STACK_I4;
6646 ins->type = STACK_I8;
6650 #if SIZEOF_REGISTER == 8
6651 ins->type = STACK_I8;
6653 ins->type = STACK_I4;
6657 ins->type = cfg->r4_stack_type;
6660 ins->type = STACK_R8;
6664 ins->type = STACK_OBJ;
6670 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6672 MonoType *t = fsig->params [0];
6675 g_assert (t->byref);
6676 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6677 if (t->type == MONO_TYPE_I1)
6678 opcode = OP_ATOMIC_STORE_I1;
6679 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6680 opcode = OP_ATOMIC_STORE_U1;
6681 else if (t->type == MONO_TYPE_I2)
6682 opcode = OP_ATOMIC_STORE_I2;
6683 else if (t->type == MONO_TYPE_U2)
6684 opcode = OP_ATOMIC_STORE_U2;
6685 else if (t->type == MONO_TYPE_I4)
6686 opcode = OP_ATOMIC_STORE_I4;
6687 else if (t->type == MONO_TYPE_U4)
6688 opcode = OP_ATOMIC_STORE_U4;
6689 else if (t->type == MONO_TYPE_R4)
6690 opcode = OP_ATOMIC_STORE_R4;
6691 else if (t->type == MONO_TYPE_R8)
6692 opcode = OP_ATOMIC_STORE_R8;
6693 #if SIZEOF_REGISTER == 8
6694 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6695 opcode = OP_ATOMIC_STORE_I8;
6696 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6697 opcode = OP_ATOMIC_STORE_U8;
6699 else if (t->type == MONO_TYPE_I)
6700 opcode = OP_ATOMIC_STORE_I4;
6701 else if (is_ref || t->type == MONO_TYPE_U)
6702 opcode = OP_ATOMIC_STORE_U4;
6706 if (!mono_arch_opcode_supported (opcode))
6709 MONO_INST_NEW (cfg, ins, opcode);
6710 ins->dreg = args [0]->dreg;
6711 ins->sreg1 = args [1]->dreg;
6712 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6713 MONO_ADD_INS (cfg->cbb, ins);
6715 if (cfg->gen_write_barriers && is_ref)
6716 emit_write_barrier (cfg, args [0], args [1]);
6722 } else if (cmethod->klass->image == mono_defaults.corlib &&
6723 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6724 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6725 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6726 if (should_insert_brekpoint (cfg->method)) {
6727 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6729 MONO_INST_NEW (cfg, ins, OP_NOP);
6730 MONO_ADD_INS (cfg->cbb, ins);
6734 } else if (cmethod->klass->image == mono_defaults.corlib &&
6735 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6736 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6737 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6739 EMIT_NEW_ICONST (cfg, ins, 1);
6741 EMIT_NEW_ICONST (cfg, ins, 0);
6744 } else if (cmethod->klass->image == mono_defaults.corlib &&
6745 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6746 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6747 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6748 /* No stack walks are currently available, so implement this as an intrinsic */
6749 MonoInst *assembly_ins;
6751 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6752 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6755 } else if (cmethod->klass->image == mono_defaults.corlib &&
6756 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6757 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6758 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6759 /* No stack walks are currently available, so implement this as an intrinsic */
6760 MonoInst *method_ins;
6761 MonoMethod *declaring = cfg->method;
6763 /* This returns the declaring generic method */
6764 if (declaring->is_inflated)
6765 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6766 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6767 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6768 cfg->no_inline = TRUE;
6769 if (cfg->method != cfg->current_method)
6770 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6773 } else if (cmethod->klass == mono_defaults.math_class) {
6775 * There is general branchless code for Min/Max, but it does not work for
6777 * http://everything2.com/?node_id=1051618
6779 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6780 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6781 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6782 !strcmp (cmethod->klass->name, "Selector")) ||
6783 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6784 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6785 !strcmp (cmethod->klass->name, "Selector"))
6787 if (cfg->backend->have_objc_get_selector &&
6788 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6789 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6790 cfg->compile_aot && !cfg->llvm_only) {
6792 MonoJumpInfoToken *ji;
6797 cfg->exception_message = g_strdup ("GetHandle");
6798 cfg->disable_llvm = TRUE;
6800 if (args [0]->opcode == OP_GOT_ENTRY) {
6801 pi = (MonoInst *)args [0]->inst_p1;
6802 g_assert (pi->opcode == OP_PATCH_INFO);
6803 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6804 ji = (MonoJumpInfoToken *)pi->inst_p0;
6806 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6807 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6810 NULLIFY_INS (args [0]);
6812 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6813 return_val_if_nok (&cfg->error, NULL);
6815 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6816 ins->dreg = mono_alloc_ireg (cfg);
6819 MONO_ADD_INS (cfg->cbb, ins);
6824 #ifdef MONO_ARCH_SIMD_INTRINSICS
6825 if (cfg->opt & MONO_OPT_SIMD) {
6826 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6832 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6836 if (COMPILE_LLVM (cfg)) {
6837 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6842 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6846 * This entry point could be used later for arbitrary method
6849 inline static MonoInst*
6850 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6851 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6853 if (method->klass == mono_defaults.string_class) {
6854 /* managed string allocation support */
6855 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6856 MonoInst *iargs [2];
6857 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6858 MonoMethod *managed_alloc = NULL;
6860 g_assert (vtable); /*Should not fail since it System.String*/
6861 #ifndef MONO_CROSS_COMPILE
6862 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6866 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6867 iargs [1] = args [0];
6868 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6875 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6877 MonoInst *store, *temp;
6880 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6881 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6884 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6885 * would be different than the MonoInst's used to represent arguments, and
6886 * the ldelema implementation can't deal with that.
6887 * Solution: When ldelema is used on an inline argument, create a var for
6888 * it, emit ldelema on that var, and emit the saving code below in
6889 * inline_method () if needed.
6891 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6892 cfg->args [i] = temp;
6893 /* This uses cfg->args [i] which is set by the preceeding line */
6894 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6895 store->cil_code = sp [0]->cil_code;
6900 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6901 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6903 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6905 check_inline_called_method_name_limit (MonoMethod *called_method)
6908 static const char *limit = NULL;
6910 if (limit == NULL) {
6911 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6913 if (limit_string != NULL)
6914 limit = limit_string;
6919 if (limit [0] != '\0') {
6920 char *called_method_name = mono_method_full_name (called_method, TRUE);
6922 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6923 g_free (called_method_name);
6925 //return (strncmp_result <= 0);
6926 return (strncmp_result == 0);
6933 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6935 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6938 static const char *limit = NULL;
6940 if (limit == NULL) {
6941 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6942 if (limit_string != NULL) {
6943 limit = limit_string;
6949 if (limit [0] != '\0') {
6950 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6952 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6953 g_free (caller_method_name);
6955 //return (strncmp_result <= 0);
6956 return (strncmp_result == 0);
6964 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6966 static double r8_0 = 0.0;
6967 static float r4_0 = 0.0;
6971 rtype = mini_get_underlying_type (rtype);
6975 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6976 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6977 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6978 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6979 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6980 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6981 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6982 ins->type = STACK_R4;
6983 ins->inst_p0 = (void*)&r4_0;
6985 MONO_ADD_INS (cfg->cbb, ins);
6986 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6987 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6988 ins->type = STACK_R8;
6989 ins->inst_p0 = (void*)&r8_0;
6991 MONO_ADD_INS (cfg->cbb, ins);
6992 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6993 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6994 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6995 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6996 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6998 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7003 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7007 rtype = mini_get_underlying_type (rtype);
7011 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7012 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7013 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7014 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7016 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7017 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7018 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7019 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7020 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7021 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7022 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7023 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7024 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7026 emit_init_rvar (cfg, dreg, rtype);
7030 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7032 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7034 MonoInst *var = cfg->locals [local];
7035 if (COMPILE_SOFT_FLOAT (cfg)) {
7037 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7038 emit_init_rvar (cfg, reg, type);
7039 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7042 emit_init_rvar (cfg, var->dreg, type);
7044 emit_dummy_init_rvar (cfg, var->dreg, type);
7051 * Return the cost of inlining CMETHOD.
7054 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7055 guchar *ip, guint real_offset, gboolean inline_always)
7058 MonoInst *ins, *rvar = NULL;
7059 MonoMethodHeader *cheader;
7060 MonoBasicBlock *ebblock, *sbblock;
7062 MonoMethod *prev_inlined_method;
7063 MonoInst **prev_locals, **prev_args;
7064 MonoType **prev_arg_types;
7065 guint prev_real_offset;
7066 GHashTable *prev_cbb_hash;
7067 MonoBasicBlock **prev_cil_offset_to_bb;
7068 MonoBasicBlock *prev_cbb;
7069 unsigned char* prev_cil_start;
7070 guint32 prev_cil_offset_to_bb_len;
7071 MonoMethod *prev_current_method;
7072 MonoGenericContext *prev_generic_context;
7073 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7075 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7077 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7078 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7081 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7082 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7087 fsig = mono_method_signature (cmethod);
7089 if (cfg->verbose_level > 2)
7090 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7092 if (!cmethod->inline_info) {
7093 cfg->stat_inlineable_methods++;
7094 cmethod->inline_info = 1;
7097 /* allocate local variables */
7098 cheader = mono_method_get_header_checked (cmethod, &error);
7100 if (inline_always) {
7101 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7102 mono_error_move (&cfg->error, &error);
7104 mono_error_cleanup (&error);
7109 /*Must verify before creating locals as it can cause the JIT to assert.*/
7110 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7111 mono_metadata_free_mh (cheader);
7115 /* allocate space to store the return value */
7116 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7117 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7120 prev_locals = cfg->locals;
7121 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7122 for (i = 0; i < cheader->num_locals; ++i)
7123 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7125 /* allocate start and end blocks */
7126 /* This is needed so if the inline is aborted, we can clean up */
7127 NEW_BBLOCK (cfg, sbblock);
7128 sbblock->real_offset = real_offset;
7130 NEW_BBLOCK (cfg, ebblock);
7131 ebblock->block_num = cfg->num_bblocks++;
7132 ebblock->real_offset = real_offset;
7134 prev_args = cfg->args;
7135 prev_arg_types = cfg->arg_types;
7136 prev_inlined_method = cfg->inlined_method;
7137 cfg->inlined_method = cmethod;
7138 cfg->ret_var_set = FALSE;
7139 cfg->inline_depth ++;
7140 prev_real_offset = cfg->real_offset;
7141 prev_cbb_hash = cfg->cbb_hash;
7142 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7143 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7144 prev_cil_start = cfg->cil_start;
7145 prev_cbb = cfg->cbb;
7146 prev_current_method = cfg->current_method;
7147 prev_generic_context = cfg->generic_context;
7148 prev_ret_var_set = cfg->ret_var_set;
7149 prev_disable_inline = cfg->disable_inline;
7151 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7154 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7156 ret_var_set = cfg->ret_var_set;
7158 cfg->inlined_method = prev_inlined_method;
7159 cfg->real_offset = prev_real_offset;
7160 cfg->cbb_hash = prev_cbb_hash;
7161 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7162 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7163 cfg->cil_start = prev_cil_start;
7164 cfg->locals = prev_locals;
7165 cfg->args = prev_args;
7166 cfg->arg_types = prev_arg_types;
7167 cfg->current_method = prev_current_method;
7168 cfg->generic_context = prev_generic_context;
7169 cfg->ret_var_set = prev_ret_var_set;
7170 cfg->disable_inline = prev_disable_inline;
7171 cfg->inline_depth --;
7173 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7174 if (cfg->verbose_level > 2)
7175 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7177 cfg->stat_inlined_methods++;
7179 /* always add some code to avoid block split failures */
7180 MONO_INST_NEW (cfg, ins, OP_NOP);
7181 MONO_ADD_INS (prev_cbb, ins);
7183 prev_cbb->next_bb = sbblock;
7184 link_bblock (cfg, prev_cbb, sbblock);
7187 * Get rid of the begin and end bblocks if possible to aid local
7190 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7192 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7193 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7195 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7196 MonoBasicBlock *prev = ebblock->in_bb [0];
7198 if (prev->next_bb == ebblock) {
7199 mono_merge_basic_blocks (cfg, prev, ebblock);
7201 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7202 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7203 cfg->cbb = prev_cbb;
7206 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7211 * Its possible that the rvar is set in some prev bblock, but not in others.
7217 for (i = 0; i < ebblock->in_count; ++i) {
7218 bb = ebblock->in_bb [i];
7220 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7223 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7233 * If the inlined method contains only a throw, then the ret var is not
7234 * set, so set it to a dummy value.
7237 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7239 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7242 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7245 if (cfg->verbose_level > 2)
7246 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7247 cfg->exception_type = MONO_EXCEPTION_NONE;
7249 /* This gets rid of the newly added bblocks */
7250 cfg->cbb = prev_cbb;
7252 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7257 * Some of these comments may well be out-of-date.
7258 * Design decisions: we do a single pass over the IL code (and we do bblock
7259 * splitting/merging in the few cases when it's required: a back jump to an IL
7260 * address that was not already seen as bblock starting point).
7261 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7262 * Complex operations are decomposed in simpler ones right away. We need to let the
7263 * arch-specific code peek and poke inside this process somehow (except when the
7264 * optimizations can take advantage of the full semantic info of coarse opcodes).
7265 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7266 * MonoInst->opcode initially is the IL opcode or some simplification of that
7267 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7268 * opcode with value bigger than OP_LAST.
7269 * At this point the IR can be handed over to an interpreter, a dumb code generator
7270 * or to the optimizing code generator that will translate it to SSA form.
7272 * Profiling directed optimizations.
7273 * We may compile by default with few or no optimizations and instrument the code
7274 * or the user may indicate what methods to optimize the most either in a config file
7275 * or through repeated runs where the compiler applies offline the optimizations to
7276 * each method and then decides if it was worth it.
7279 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7280 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7281 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7282 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7283 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7284 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7285 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7286 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7288 /* offset from br.s -> br like opcodes */
7289 #define BIG_BRANCH_OFFSET 13
7292 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7294 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7296 return b == NULL || b == bb;
7300 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7302 unsigned char *ip = start;
7303 unsigned char *target;
7306 MonoBasicBlock *bblock;
7307 const MonoOpcode *opcode;
7310 cli_addr = ip - start;
7311 i = mono_opcode_value ((const guint8 **)&ip, end);
7314 opcode = &mono_opcodes [i];
7315 switch (opcode->argument) {
7316 case MonoInlineNone:
7319 case MonoInlineString:
7320 case MonoInlineType:
7321 case MonoInlineField:
7322 case MonoInlineMethod:
7325 case MonoShortInlineR:
7332 case MonoShortInlineVar:
7333 case MonoShortInlineI:
7336 case MonoShortInlineBrTarget:
7337 target = start + cli_addr + 2 + (signed char)ip [1];
7338 GET_BBLOCK (cfg, bblock, target);
7341 GET_BBLOCK (cfg, bblock, ip);
7343 case MonoInlineBrTarget:
7344 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7345 GET_BBLOCK (cfg, bblock, target);
7348 GET_BBLOCK (cfg, bblock, ip);
7350 case MonoInlineSwitch: {
7351 guint32 n = read32 (ip + 1);
7354 cli_addr += 5 + 4 * n;
7355 target = start + cli_addr;
7356 GET_BBLOCK (cfg, bblock, target);
7358 for (j = 0; j < n; ++j) {
7359 target = start + cli_addr + (gint32)read32 (ip);
7360 GET_BBLOCK (cfg, bblock, target);
7370 g_assert_not_reached ();
7373 if (i == CEE_THROW) {
7374 unsigned char *bb_start = ip - 1;
7376 /* Find the start of the bblock containing the throw */
7378 while ((bb_start >= start) && !bblock) {
7379 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7383 bblock->out_of_line = 1;
7393 static inline MonoMethod *
7394 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7398 mono_error_init (error);
7400 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7401 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7403 method = mono_class_inflate_generic_method_checked (method, context, error);
7406 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7412 static inline MonoMethod *
7413 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7416 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7418 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7419 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7423 if (!method && !cfg)
7424 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7429 static inline MonoClass*
7430 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7435 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7436 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7438 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7439 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7442 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7443 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7446 mono_class_init (klass);
7450 static inline MonoMethodSignature*
7451 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
7453 MonoMethodSignature *fsig;
7455 mono_error_init (error);
7456 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7457 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7459 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
7460 return_val_if_nok (error, NULL);
7463 fsig = mono_inflate_generic_signature(fsig, context, error);
7469 throw_exception (void)
7471 static MonoMethod *method = NULL;
7474 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7475 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7482 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7484 MonoMethod *thrower = throw_exception ();
7487 EMIT_NEW_PCONST (cfg, args [0], ex);
7488 mono_emit_method_call (cfg, thrower, args, NULL);
7492 * Return the original method is a wrapper is specified. We can only access
7493 * the custom attributes from the original method.
7496 get_original_method (MonoMethod *method)
7498 if (method->wrapper_type == MONO_WRAPPER_NONE)
7501 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7502 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7505 /* in other cases we need to find the original method */
7506 return mono_marshal_method_from_wrapper (method);
7510 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7512 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7513 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7515 emit_throw_exception (cfg, ex);
7519 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7521 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7522 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7524 emit_throw_exception (cfg, ex);
7528 * Check that the IL instructions at ip are the array initialization
7529 * sequence and return the pointer to the data and the size.
7532 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7535 * newarr[System.Int32]
7537 * ldtoken field valuetype ...
7538 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7540 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7542 guint32 token = read32 (ip + 7);
7543 guint32 field_token = read32 (ip + 2);
7544 guint32 field_index = field_token & 0xffffff;
7546 const char *data_ptr;
7548 MonoMethod *cmethod;
7549 MonoClass *dummy_class;
7550 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7554 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7558 *out_field_token = field_token;
7560 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7563 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7565 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7566 case MONO_TYPE_BOOLEAN:
7570 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7571 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7572 case MONO_TYPE_CHAR:
7589 if (size > mono_type_size (field->type, &dummy_align))
7592 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7593 if (!image_is_dynamic (method->klass->image)) {
7594 field_index = read32 (ip + 2) & 0xffffff;
7595 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7596 data_ptr = mono_image_rva_map (method->klass->image, rva);
7597 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7598 /* for aot code we do the lookup on load */
7599 if (aot && data_ptr)
7600 return (const char *)GUINT_TO_POINTER (rva);
7602 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7604 data_ptr = mono_field_get_data (field);
7612 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7615 char *method_fname = mono_method_full_name (method, TRUE);
7617 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7620 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7621 mono_error_cleanup (&error);
7622 } else if (header->code_size == 0)
7623 method_code = g_strdup ("method body is empty.");
7625 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7626 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7627 g_free (method_fname);
7628 g_free (method_code);
7629 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7633 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7636 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7637 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7638 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7639 /* Optimize reg-reg moves away */
7641 * Can't optimize other opcodes, since sp[0] might point to
7642 * the last ins of a decomposed opcode.
7644 sp [0]->dreg = (cfg)->locals [n]->dreg;
7646 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7651 * ldloca inhibits many optimizations so try to get rid of it in common
7654 static inline unsigned char *
7655 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7665 local = read16 (ip + 2);
7669 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7670 /* From the INITOBJ case */
7671 token = read32 (ip + 2);
7672 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7673 CHECK_TYPELOAD (klass);
7674 type = mini_get_underlying_type (&klass->byval_arg);
7675 emit_init_local (cfg, local, type, TRUE);
7683 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7685 MonoInst *icall_args [16];
7686 MonoInst *call_target, *ins, *vtable_ins;
7687 int arg_reg, this_reg, vtable_reg;
7688 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7689 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7690 gboolean variant_iface = FALSE;
7695 * In llvm-only mode, vtables contain function descriptors instead of
7696 * method addresses/trampolines.
7698 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7701 slot = mono_method_get_imt_slot (cmethod);
7703 slot = mono_method_get_vtable_index (cmethod);
7705 this_reg = sp [0]->dreg;
7707 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7708 variant_iface = TRUE;
7710 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7712 * The simplest case, a normal virtual call.
7714 int slot_reg = alloc_preg (cfg);
7715 int addr_reg = alloc_preg (cfg);
7716 int arg_reg = alloc_preg (cfg);
7717 MonoBasicBlock *non_null_bb;
7719 vtable_reg = alloc_preg (cfg);
7720 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7721 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7723 /* Load the vtable slot, which contains a function descriptor. */
7724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7726 NEW_BBLOCK (cfg, non_null_bb);
7728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7729 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7733 // FIXME: Make the wrapper use the preserveall cconv
7734 // FIXME: Use one icall per slot for small slot numbers ?
7735 icall_args [0] = vtable_ins;
7736 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7737 /* Make the icall return the vtable slot value to save some code space */
7738 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7739 ins->dreg = slot_reg;
7740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7743 MONO_START_BB (cfg, non_null_bb);
7744 /* Load the address + arg from the vtable slot */
7745 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7748 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7751 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7753 * A simple interface call
7755 * We make a call through an imt slot to obtain the function descriptor we need to call.
7756 * The imt slot contains a function descriptor for a runtime function + arg.
7758 int slot_reg = alloc_preg (cfg);
7759 int addr_reg = alloc_preg (cfg);
7760 int arg_reg = alloc_preg (cfg);
7761 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7763 vtable_reg = alloc_preg (cfg);
7764 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7765 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7768 * The slot is already initialized when the vtable is created so there is no need
7772 /* Load the imt slot, which contains a function descriptor. */
7773 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7775 /* Load the address + arg of the imt thunk from the imt slot */
7776 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7777 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7779 * IMT thunks in llvm-only mode are C functions which take an info argument
7780 * plus the imt method and return the ftndesc to call.
7782 icall_args [0] = thunk_arg_ins;
7783 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7784 cmethod, MONO_RGCTX_INFO_METHOD);
7785 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7787 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7790 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7792 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7793 * dynamically extended as more instantiations are discovered.
7794 * This handles generic virtual methods both on classes and interfaces.
7796 int slot_reg = alloc_preg (cfg);
7797 int addr_reg = alloc_preg (cfg);
7798 int arg_reg = alloc_preg (cfg);
7799 int ftndesc_reg = alloc_preg (cfg);
7800 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7801 MonoBasicBlock *slowpath_bb, *end_bb;
7803 NEW_BBLOCK (cfg, slowpath_bb);
7804 NEW_BBLOCK (cfg, end_bb);
7806 vtable_reg = alloc_preg (cfg);
7807 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7809 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7811 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7813 /* Load the slot, which contains a function descriptor. */
7814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7816 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7817 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7818 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7819 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7822 /* Same as with iface calls */
7823 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7824 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7825 icall_args [0] = thunk_arg_ins;
7826 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7827 cmethod, MONO_RGCTX_INFO_METHOD);
7828 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7829 ftndesc_ins->dreg = ftndesc_reg;
7831 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7832 * they don't know about yet. Fall back to the slowpath in that case.
7834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7835 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7837 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7840 MONO_START_BB (cfg, slowpath_bb);
7841 icall_args [0] = vtable_ins;
7842 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7843 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7844 cmethod, MONO_RGCTX_INFO_METHOD);
7846 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7848 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7849 ftndesc_ins->dreg = ftndesc_reg;
7850 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7853 MONO_START_BB (cfg, end_bb);
7854 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7858 * Non-optimized cases
7860 icall_args [0] = sp [0];
7861 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7863 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7864 cmethod, MONO_RGCTX_INFO_METHOD);
7866 arg_reg = alloc_preg (cfg);
7867 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7868 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7870 g_assert (is_gsharedvt);
7872 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7874 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7877 * Pass the extra argument even if the callee doesn't receive it, most
7878 * calling conventions allow this.
7880 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7884 is_exception_class (MonoClass *klass)
7887 if (klass == mono_defaults.exception_class)
7889 klass = klass->parent;
7895 * is_jit_optimizer_disabled:
7897 * Determine whenever M's assembly has a DebuggableAttribute with the
7898 * IsJITOptimizerDisabled flag set.
7901 is_jit_optimizer_disabled (MonoMethod *m)
7904 MonoAssembly *ass = m->klass->image->assembly;
7905 MonoCustomAttrInfo* attrs;
7908 gboolean val = FALSE;
7911 if (ass->jit_optimizer_disabled_inited)
7912 return ass->jit_optimizer_disabled;
7914 klass = mono_class_try_get_debuggable_attribute_class ();
7918 ass->jit_optimizer_disabled = FALSE;
7919 mono_memory_barrier ();
7920 ass->jit_optimizer_disabled_inited = TRUE;
7924 attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
7925 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7927 for (i = 0; i < attrs->num_attrs; ++i) {
7928 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7930 MonoMethodSignature *sig;
7932 if (!attr->ctor || attr->ctor->klass != klass)
7934 /* Decode the attribute. See reflection.c */
7935 p = (const char*)attr->data;
7936 g_assert (read16 (p) == 0x0001);
7939 // FIXME: Support named parameters
7940 sig = mono_method_signature (attr->ctor);
7941 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7943 /* Two boolean arguments */
7947 mono_custom_attrs_free (attrs);
7950 ass->jit_optimizer_disabled = val;
7951 mono_memory_barrier ();
7952 ass->jit_optimizer_disabled_inited = TRUE;
7958 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7960 gboolean supported_tail_call;
7963 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7965 for (i = 0; i < fsig->param_count; ++i) {
7966 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7967 /* These can point to the current method's stack */
7968 supported_tail_call = FALSE;
7970 if (fsig->hasthis && cmethod->klass->valuetype)
7971 /* this might point to the current method's stack */
7972 supported_tail_call = FALSE;
7973 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7974 supported_tail_call = FALSE;
7975 if (cfg->method->save_lmf)
7976 supported_tail_call = FALSE;
7977 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7978 supported_tail_call = FALSE;
7979 if (call_opcode != CEE_CALL)
7980 supported_tail_call = FALSE;
7982 /* Debugging support */
7984 if (supported_tail_call) {
7985 if (!mono_debug_count ())
7986 supported_tail_call = FALSE;
7990 return supported_tail_call;
7996 * Handle calls made to ctors from NEWOBJ opcodes.
7999 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
8000 MonoInst **sp, guint8 *ip, int *inline_costs)
8002 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8004 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8005 mono_method_is_generic_sharable (cmethod, TRUE)) {
8006 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8007 mono_class_vtable (cfg->domain, cmethod->klass);
8008 CHECK_TYPELOAD (cmethod->klass);
8010 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8011 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8014 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8015 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8017 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8019 CHECK_TYPELOAD (cmethod->klass);
8020 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8025 /* Avoid virtual calls to ctors if possible */
8026 if (mono_class_is_marshalbyref (cmethod->klass))
8027 callvirt_this_arg = sp [0];
8029 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8030 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8031 CHECK_CFG_EXCEPTION;
8032 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8033 mono_method_check_inlining (cfg, cmethod) &&
8034 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8037 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8038 cfg->real_offset += 5;
8040 *inline_costs += costs - 5;
8042 INLINE_FAILURE ("inline failure");
8043 // FIXME-VT: Clean this up
8044 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8045 GSHAREDVT_FAILURE(*ip);
8046 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8048 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8051 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8053 if (cfg->llvm_only) {
8054 // FIXME: Avoid initializing vtable_arg
8055 emit_llvmonly_calli (cfg, fsig, sp, addr);
8057 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8059 } else if (context_used &&
8060 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8061 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8062 MonoInst *cmethod_addr;
8064 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8066 if (cfg->llvm_only) {
8067 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8068 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8069 emit_llvmonly_calli (cfg, fsig, sp, addr);
8071 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8072 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8074 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8077 INLINE_FAILURE ("ctor call");
8078 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8079 callvirt_this_arg, NULL, vtable_arg);
8086 emit_setret (MonoCompile *cfg, MonoInst *val)
8088 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8091 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8094 if (!cfg->vret_addr) {
8095 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8097 EMIT_NEW_RETLOADA (cfg, ret_addr);
8099 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8100 ins->klass = mono_class_from_mono_type (ret_type);
8103 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8104 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8105 MonoInst *iargs [1];
8109 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8110 mono_arch_emit_setret (cfg, cfg->method, conv);
8112 mono_arch_emit_setret (cfg, cfg->method, val);
8115 mono_arch_emit_setret (cfg, cfg->method, val);
8121 * mono_method_to_ir:
8123 * Translate the .net IL into linear IR.
8126 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8127 MonoInst *return_var, MonoInst **inline_args,
8128 guint inline_offset, gboolean is_virtual_call)
8131 MonoInst *ins, **sp, **stack_start;
8132 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8133 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8134 MonoMethod *cmethod, *method_definition;
8135 MonoInst **arg_array;
8136 MonoMethodHeader *header;
8138 guint32 token, ins_flag;
8140 MonoClass *constrained_class = NULL;
8141 unsigned char *ip, *end, *target, *err_pos;
8142 MonoMethodSignature *sig;
8143 MonoGenericContext *generic_context = NULL;
8144 MonoGenericContainer *generic_container = NULL;
8145 MonoType **param_types;
8146 int i, n, start_new_bblock, dreg;
8147 int num_calls = 0, inline_costs = 0;
8148 int breakpoint_id = 0;
8150 GSList *class_inits = NULL;
8151 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8153 gboolean init_locals, seq_points, skip_dead_blocks;
8154 gboolean sym_seq_points = FALSE;
8155 MonoDebugMethodInfo *minfo;
8156 MonoBitSet *seq_point_locs = NULL;
8157 MonoBitSet *seq_point_set_locs = NULL;
8159 cfg->disable_inline = is_jit_optimizer_disabled (method);
8161 /* serialization and xdomain stuff may need access to private fields and methods */
8162 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8163 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8164 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8165 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8166 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8167 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8169 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8170 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8171 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8172 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8173 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8175 image = method->klass->image;
8176 header = mono_method_get_header_checked (method, &cfg->error);
8178 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8179 goto exception_exit;
8181 generic_container = mono_method_get_generic_container (method);
8182 sig = mono_method_signature (method);
8183 num_args = sig->hasthis + sig->param_count;
8184 ip = (unsigned char*)header->code;
8185 cfg->cil_start = ip;
8186 end = ip + header->code_size;
8187 cfg->stat_cil_code_size += header->code_size;
8189 seq_points = cfg->gen_seq_points && cfg->method == method;
8191 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8192 /* We could hit a seq point before attaching to the JIT (#8338) */
8196 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8197 minfo = mono_debug_lookup_method (method);
8199 MonoSymSeqPoint *sps;
8200 int i, n_il_offsets;
8202 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8203 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8204 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8205 sym_seq_points = TRUE;
8206 for (i = 0; i < n_il_offsets; ++i) {
8207 if (sps [i].il_offset < header->code_size)
8208 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8211 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8212 /* Methods without line number info like auto-generated property accessors */
8213 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8214 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8215 sym_seq_points = TRUE;
8220 * Methods without init_locals set could cause asserts in various passes
8221 * (#497220). To work around this, we emit dummy initialization opcodes
8222 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8223 * on some platforms.
8225 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8226 init_locals = header->init_locals;
8230 method_definition = method;
8231 while (method_definition->is_inflated) {
8232 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8233 method_definition = imethod->declaring;
8236 /* SkipVerification is not allowed if core-clr is enabled */
8237 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8239 dont_verify_stloc = TRUE;
8242 if (sig->is_inflated)
8243 generic_context = mono_method_get_context (method);
8244 else if (generic_container)
8245 generic_context = &generic_container->context;
8246 cfg->generic_context = generic_context;
8249 g_assert (!sig->has_type_parameters);
8251 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8252 g_assert (method->is_inflated);
8253 g_assert (mono_method_get_context (method)->method_inst);
8255 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8256 g_assert (sig->generic_param_count);
8258 if (cfg->method == method) {
8259 cfg->real_offset = 0;
8261 cfg->real_offset = inline_offset;
8264 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8265 cfg->cil_offset_to_bb_len = header->code_size;
8267 cfg->current_method = method;
8269 if (cfg->verbose_level > 2)
8270 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8272 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8274 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8275 for (n = 0; n < sig->param_count; ++n)
8276 param_types [n + sig->hasthis] = sig->params [n];
8277 cfg->arg_types = param_types;
8279 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8280 if (cfg->method == method) {
8282 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8283 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8286 NEW_BBLOCK (cfg, start_bblock);
8287 cfg->bb_entry = start_bblock;
8288 start_bblock->cil_code = NULL;
8289 start_bblock->cil_length = 0;
8292 NEW_BBLOCK (cfg, end_bblock);
8293 cfg->bb_exit = end_bblock;
8294 end_bblock->cil_code = NULL;
8295 end_bblock->cil_length = 0;
8296 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8297 g_assert (cfg->num_bblocks == 2);
8299 arg_array = cfg->args;
8301 if (header->num_clauses) {
8302 cfg->spvars = g_hash_table_new (NULL, NULL);
8303 cfg->exvars = g_hash_table_new (NULL, NULL);
8305 /* handle exception clauses */
8306 for (i = 0; i < header->num_clauses; ++i) {
8307 MonoBasicBlock *try_bb;
8308 MonoExceptionClause *clause = &header->clauses [i];
8309 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8311 try_bb->real_offset = clause->try_offset;
8312 try_bb->try_start = TRUE;
8313 try_bb->region = ((i + 1) << 8) | clause->flags;
8314 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8315 tblock->real_offset = clause->handler_offset;
8316 tblock->flags |= BB_EXCEPTION_HANDLER;
8319 * Linking the try block with the EH block hinders inlining as we won't be able to
8320 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8322 if (COMPILE_LLVM (cfg))
8323 link_bblock (cfg, try_bb, tblock);
8325 if (*(ip + clause->handler_offset) == CEE_POP)
8326 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8328 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8329 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8330 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8331 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8332 MONO_ADD_INS (tblock, ins);
8334 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8335 /* finally clauses already have a seq point */
8336 /* seq points for filter clauses are emitted below */
8337 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8338 MONO_ADD_INS (tblock, ins);
8341 /* todo: is a fault block unsafe to optimize? */
8342 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8343 tblock->flags |= BB_EXCEPTION_UNSAFE;
8346 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8348 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8350 /* catch and filter blocks get the exception object on the stack */
8351 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8352 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8354 /* mostly like handle_stack_args (), but just sets the input args */
8355 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8356 tblock->in_scount = 1;
8357 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8358 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8362 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8363 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8364 if (!cfg->compile_llvm) {
8365 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8366 ins->dreg = tblock->in_stack [0]->dreg;
8367 MONO_ADD_INS (tblock, ins);
8370 MonoInst *dummy_use;
8373 * Add a dummy use for the exvar so its liveness info will be
8376 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8379 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8380 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8381 MONO_ADD_INS (tblock, ins);
8384 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8385 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8386 tblock->flags |= BB_EXCEPTION_HANDLER;
8387 tblock->real_offset = clause->data.filter_offset;
8388 tblock->in_scount = 1;
8389 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8390 /* The filter block shares the exvar with the handler block */
8391 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8392 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8393 MONO_ADD_INS (tblock, ins);
8397 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8398 clause->data.catch_class &&
8400 mono_class_check_context_used (clause->data.catch_class)) {
8402 * In shared generic code with catch
8403 * clauses containing type variables
8404 * the exception handling code has to
8405 * be able to get to the rgctx.
8406 * Therefore we have to make sure that
8407 * the vtable/mrgctx argument (for
8408 * static or generic methods) or the
8409 * "this" argument (for non-static
8410 * methods) are live.
8412 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8413 mini_method_get_context (method)->method_inst ||
8414 method->klass->valuetype) {
8415 mono_get_vtable_var (cfg);
8417 MonoInst *dummy_use;
8419 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8424 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8425 cfg->cbb = start_bblock;
8426 cfg->args = arg_array;
8427 mono_save_args (cfg, sig, inline_args);
8430 /* FIRST CODE BLOCK */
8431 NEW_BBLOCK (cfg, tblock);
8432 tblock->cil_code = ip;
8436 ADD_BBLOCK (cfg, tblock);
8438 if (cfg->method == method) {
8439 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8440 if (breakpoint_id) {
8441 MONO_INST_NEW (cfg, ins, OP_BREAK);
8442 MONO_ADD_INS (cfg->cbb, ins);
8446 /* we use a separate basic block for the initialization code */
8447 NEW_BBLOCK (cfg, init_localsbb);
8448 cfg->bb_init = init_localsbb;
8449 init_localsbb->real_offset = cfg->real_offset;
8450 start_bblock->next_bb = init_localsbb;
8451 init_localsbb->next_bb = cfg->cbb;
8452 link_bblock (cfg, start_bblock, init_localsbb);
8453 link_bblock (cfg, init_localsbb, cfg->cbb);
8455 cfg->cbb = init_localsbb;
8457 if (cfg->gsharedvt && cfg->method == method) {
8458 MonoGSharedVtMethodInfo *info;
8459 MonoInst *var, *locals_var;
8462 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8463 info->method = cfg->method;
8464 info->count_entries = 16;
8465 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8466 cfg->gsharedvt_info = info;
8468 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8469 /* prevent it from being register allocated */
8470 //var->flags |= MONO_INST_VOLATILE;
8471 cfg->gsharedvt_info_var = var;
8473 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8474 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8476 /* Allocate locals */
8477 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8478 /* prevent it from being register allocated */
8479 //locals_var->flags |= MONO_INST_VOLATILE;
8480 cfg->gsharedvt_locals_var = locals_var;
8482 dreg = alloc_ireg (cfg);
8483 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8485 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8486 ins->dreg = locals_var->dreg;
8488 MONO_ADD_INS (cfg->cbb, ins);
8489 cfg->gsharedvt_locals_var_ins = ins;
8491 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8494 ins->flags |= MONO_INST_INIT;
8498 if (mono_security_core_clr_enabled ()) {
8499 /* check if this is native code, e.g. an icall or a p/invoke */
8500 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8501 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8503 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8504 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8506 /* if this ia a native call then it can only be JITted from platform code */
8507 if ((icall || pinvk) && method->klass && method->klass->image) {
8508 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8509 MonoException *ex = icall ? mono_get_exception_security () :
8510 mono_get_exception_method_access ();
8511 emit_throw_exception (cfg, ex);
8518 CHECK_CFG_EXCEPTION;
8520 if (header->code_size == 0)
8523 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8528 if (cfg->method == method)
8529 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8531 for (n = 0; n < header->num_locals; ++n) {
8532 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8537 /* We force the vtable variable here for all shared methods
8538 for the possibility that they might show up in a stack
8539 trace where their exact instantiation is needed. */
8540 if (cfg->gshared && method == cfg->method) {
8541 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8542 mini_method_get_context (method)->method_inst ||
8543 method->klass->valuetype) {
8544 mono_get_vtable_var (cfg);
8546 /* FIXME: Is there a better way to do this?
8547 We need the variable live for the duration
8548 of the whole method. */
8549 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8553 /* add a check for this != NULL to inlined methods */
8554 if (is_virtual_call) {
8557 NEW_ARGLOAD (cfg, arg_ins, 0);
8558 MONO_ADD_INS (cfg->cbb, arg_ins);
8559 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8562 skip_dead_blocks = !dont_verify;
8563 if (skip_dead_blocks) {
8564 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
8569 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8570 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8573 start_new_bblock = 0;
8575 if (cfg->method == method)
8576 cfg->real_offset = ip - header->code;
8578 cfg->real_offset = inline_offset;
8583 if (start_new_bblock) {
8584 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8585 if (start_new_bblock == 2) {
8586 g_assert (ip == tblock->cil_code);
8588 GET_BBLOCK (cfg, tblock, ip);
8590 cfg->cbb->next_bb = tblock;
8592 start_new_bblock = 0;
8593 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8594 if (cfg->verbose_level > 3)
8595 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8596 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8600 g_slist_free (class_inits);
8603 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8604 link_bblock (cfg, cfg->cbb, tblock);
8605 if (sp != stack_start) {
8606 handle_stack_args (cfg, stack_start, sp - stack_start);
8608 CHECK_UNVERIFIABLE (cfg);
8610 cfg->cbb->next_bb = tblock;
8612 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8613 if (cfg->verbose_level > 3)
8614 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8615 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8618 g_slist_free (class_inits);
8623 if (skip_dead_blocks) {
8624 int ip_offset = ip - header->code;
8626 if (ip_offset == bb->end)
8630 int op_size = mono_opcode_size (ip, end);
8631 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8633 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8635 if (ip_offset + op_size == bb->end) {
8636 MONO_INST_NEW (cfg, ins, OP_NOP);
8637 MONO_ADD_INS (cfg->cbb, ins);
8638 start_new_bblock = 1;
8646 * Sequence points are points where the debugger can place a breakpoint.
8647 * Currently, we generate these automatically at points where the IL
8650 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8652 * Make methods interruptable at the beginning, and at the targets of
8653 * backward branches.
8654 * Also, do this at the start of every bblock in methods with clauses too,
8655 * to be able to handle instructions with inprecise control flow like
8657 * Backward branches are handled at the end of method-to-ir ().
8659 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8660 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8662 /* Avoid sequence points on empty IL like .volatile */
8663 // FIXME: Enable this
8664 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8665 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8666 if ((sp != stack_start) && !sym_seq_point)
8667 ins->flags |= MONO_INST_NONEMPTY_STACK;
8668 MONO_ADD_INS (cfg->cbb, ins);
8671 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8674 cfg->cbb->real_offset = cfg->real_offset;
8676 if ((cfg->method == method) && cfg->coverage_info) {
8677 guint32 cil_offset = ip - header->code;
8678 cfg->coverage_info->data [cil_offset].cil_code = ip;
8680 /* TODO: Use an increment here */
8681 #if defined(TARGET_X86)
8682 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8683 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8685 MONO_ADD_INS (cfg->cbb, ins);
8687 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8688 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8692 if (cfg->verbose_level > 3)
8693 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8697 if (seq_points && !sym_seq_points && sp != stack_start) {
8699 * The C# compiler uses these nops to notify the JIT that it should
8700 * insert seq points.
8702 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8703 MONO_ADD_INS (cfg->cbb, ins);
8705 if (cfg->keep_cil_nops)
8706 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8708 MONO_INST_NEW (cfg, ins, OP_NOP);
8710 MONO_ADD_INS (cfg->cbb, ins);
8713 if (should_insert_brekpoint (cfg->method)) {
8714 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8716 MONO_INST_NEW (cfg, ins, OP_NOP);
8719 MONO_ADD_INS (cfg->cbb, ins);
8725 CHECK_STACK_OVF (1);
8726 n = (*ip)-CEE_LDARG_0;
8728 EMIT_NEW_ARGLOAD (cfg, ins, n);
8736 CHECK_STACK_OVF (1);
8737 n = (*ip)-CEE_LDLOC_0;
8739 EMIT_NEW_LOCLOAD (cfg, ins, n);
8748 n = (*ip)-CEE_STLOC_0;
8751 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8753 emit_stloc_ir (cfg, sp, header, n);
8760 CHECK_STACK_OVF (1);
8763 EMIT_NEW_ARGLOAD (cfg, ins, n);
8769 CHECK_STACK_OVF (1);
8772 NEW_ARGLOADA (cfg, ins, n);
8773 MONO_ADD_INS (cfg->cbb, ins);
8783 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8785 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8790 CHECK_STACK_OVF (1);
8793 EMIT_NEW_LOCLOAD (cfg, ins, n);
8797 case CEE_LDLOCA_S: {
8798 unsigned char *tmp_ip;
8800 CHECK_STACK_OVF (1);
8801 CHECK_LOCAL (ip [1]);
8803 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8809 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8818 CHECK_LOCAL (ip [1]);
8819 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8821 emit_stloc_ir (cfg, sp, header, ip [1]);
8826 CHECK_STACK_OVF (1);
8827 EMIT_NEW_PCONST (cfg, ins, NULL);
8828 ins->type = STACK_OBJ;
8833 CHECK_STACK_OVF (1);
8834 EMIT_NEW_ICONST (cfg, ins, -1);
8847 CHECK_STACK_OVF (1);
8848 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8854 CHECK_STACK_OVF (1);
8856 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8862 CHECK_STACK_OVF (1);
8863 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8869 CHECK_STACK_OVF (1);
8870 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8871 ins->type = STACK_I8;
8872 ins->dreg = alloc_dreg (cfg, STACK_I8);
8874 ins->inst_l = (gint64)read64 (ip);
8875 MONO_ADD_INS (cfg->cbb, ins);
8881 gboolean use_aotconst = FALSE;
8883 #ifdef TARGET_POWERPC
8884 /* FIXME: Clean this up */
8885 if (cfg->compile_aot)
8886 use_aotconst = TRUE;
8889 /* FIXME: we should really allocate this only late in the compilation process */
8890 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8892 CHECK_STACK_OVF (1);
8898 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8900 dreg = alloc_freg (cfg);
8901 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8902 ins->type = cfg->r4_stack_type;
8904 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8905 ins->type = cfg->r4_stack_type;
8906 ins->dreg = alloc_dreg (cfg, STACK_R8);
8908 MONO_ADD_INS (cfg->cbb, ins);
8918 gboolean use_aotconst = FALSE;
8920 #ifdef TARGET_POWERPC
8921 /* FIXME: Clean this up */
8922 if (cfg->compile_aot)
8923 use_aotconst = TRUE;
8926 /* FIXME: we should really allocate this only late in the compilation process */
8927 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8929 CHECK_STACK_OVF (1);
8935 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8937 dreg = alloc_freg (cfg);
8938 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8939 ins->type = STACK_R8;
8941 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8942 ins->type = STACK_R8;
8943 ins->dreg = alloc_dreg (cfg, STACK_R8);
8945 MONO_ADD_INS (cfg->cbb, ins);
8954 MonoInst *temp, *store;
8956 CHECK_STACK_OVF (1);
8960 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8961 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8963 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8966 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8979 if (sp [0]->type == STACK_R8)
8980 /* we need to pop the value from the x86 FP stack */
8981 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8986 MonoMethodSignature *fsig;
8989 INLINE_FAILURE ("jmp");
8990 GSHAREDVT_FAILURE (*ip);
8993 if (stack_start != sp)
8995 token = read32 (ip + 1);
8996 /* FIXME: check the signature matches */
8997 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9000 if (cfg->gshared && mono_method_check_context_used (cmethod))
9001 GENERIC_SHARING_FAILURE (CEE_JMP);
9003 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9005 fsig = mono_method_signature (cmethod);
9006 n = fsig->param_count + fsig->hasthis;
9007 if (cfg->llvm_only) {
9010 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9011 for (i = 0; i < n; ++i)
9012 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9013 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9015 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9016 * have to emit a normal return since llvm expects it.
9019 emit_setret (cfg, ins);
9020 MONO_INST_NEW (cfg, ins, OP_BR);
9021 ins->inst_target_bb = end_bblock;
9022 MONO_ADD_INS (cfg->cbb, ins);
9023 link_bblock (cfg, cfg->cbb, end_bblock);
9026 } else if (cfg->backend->have_op_tail_call) {
9027 /* Handle tail calls similarly to calls */
9030 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9031 call->method = cmethod;
9032 call->tail_call = TRUE;
9033 call->signature = mono_method_signature (cmethod);
9034 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9035 call->inst.inst_p0 = cmethod;
9036 for (i = 0; i < n; ++i)
9037 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9039 mono_arch_emit_call (cfg, call);
9040 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9041 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9043 for (i = 0; i < num_args; ++i)
9044 /* Prevent arguments from being optimized away */
9045 arg_array [i]->flags |= MONO_INST_VOLATILE;
9047 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9048 ins = (MonoInst*)call;
9049 ins->inst_p0 = cmethod;
9050 MONO_ADD_INS (cfg->cbb, ins);
9054 start_new_bblock = 1;
9059 MonoMethodSignature *fsig;
9062 token = read32 (ip + 1);
9066 //GSHAREDVT_FAILURE (*ip);
9071 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
9074 if (method->dynamic && fsig->pinvoke) {
9078 * This is a call through a function pointer using a pinvoke
9079 * signature. Have to create a wrapper and call that instead.
9080 * FIXME: This is very slow, need to create a wrapper at JIT time
9081 * instead based on the signature.
9083 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9084 EMIT_NEW_PCONST (cfg, args [1], fsig);
9086 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9089 n = fsig->param_count + fsig->hasthis;
9093 //g_assert (!virtual_ || fsig->hasthis);
9097 inline_costs += 10 * num_calls++;
9100 * Making generic calls out of gsharedvt methods.
9101 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9102 * patching gshared method addresses into a gsharedvt method.
9104 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9106 * We pass the address to the gsharedvt trampoline in the rgctx reg
9108 MonoInst *callee = addr;
9110 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9112 GSHAREDVT_FAILURE (*ip);
9116 GSHAREDVT_FAILURE (*ip);
9118 addr = emit_get_rgctx_sig (cfg, context_used,
9119 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9120 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9124 /* Prevent inlining of methods with indirect calls */
9125 INLINE_FAILURE ("indirect call");
9127 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9128 MonoJumpInfoType info_type;
9132 * Instead of emitting an indirect call, emit a direct call
9133 * with the contents of the aotconst as the patch info.
9135 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9136 info_type = (MonoJumpInfoType)addr->inst_c1;
9137 info_data = addr->inst_p0;
9139 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9140 info_data = addr->inst_right->inst_left;
9143 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
9144 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
9147 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9148 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9153 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9157 /* End of call, INS should contain the result of the call, if any */
9159 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9161 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9164 CHECK_CFG_EXCEPTION;
9168 constrained_class = NULL;
9172 case CEE_CALLVIRT: {
9173 MonoInst *addr = NULL;
9174 MonoMethodSignature *fsig = NULL;
9176 int virtual_ = *ip == CEE_CALLVIRT;
9177 gboolean pass_imt_from_rgctx = FALSE;
9178 MonoInst *imt_arg = NULL;
9179 MonoInst *keep_this_alive = NULL;
9180 gboolean pass_vtable = FALSE;
9181 gboolean pass_mrgctx = FALSE;
9182 MonoInst *vtable_arg = NULL;
9183 gboolean check_this = FALSE;
9184 gboolean supported_tail_call = FALSE;
9185 gboolean tail_call = FALSE;
9186 gboolean need_seq_point = FALSE;
9187 guint32 call_opcode = *ip;
9188 gboolean emit_widen = TRUE;
9189 gboolean push_res = TRUE;
9190 gboolean skip_ret = FALSE;
9191 gboolean delegate_invoke = FALSE;
9192 gboolean direct_icall = FALSE;
9193 gboolean constrained_partial_call = FALSE;
9194 MonoMethod *cil_method;
9197 token = read32 (ip + 1);
9201 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9204 cil_method = cmethod;
9206 if (constrained_class) {
9207 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9208 if (!mini_is_gsharedvt_klass (constrained_class)) {
9209 g_assert (!cmethod->klass->valuetype);
9210 if (!mini_type_is_reference (&constrained_class->byval_arg))
9211 constrained_partial_call = TRUE;
9215 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9216 if (cfg->verbose_level > 2)
9217 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9218 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9219 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9221 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9225 if (cfg->verbose_level > 2)
9226 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9228 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9230 * This is needed since get_method_constrained can't find
9231 * the method in klass representing a type var.
9232 * The type var is guaranteed to be a reference type in this
9235 if (!mini_is_gsharedvt_klass (constrained_class))
9236 g_assert (!cmethod->klass->valuetype);
9238 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9244 if (!dont_verify && !cfg->skip_visibility) {
9245 MonoMethod *target_method = cil_method;
9246 if (method->is_inflated) {
9247 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9250 if (!mono_method_can_access_method (method_definition, target_method) &&
9251 !mono_method_can_access_method (method, cil_method))
9252 emit_method_access_failure (cfg, method, cil_method);
9255 if (mono_security_core_clr_enabled ())
9256 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9258 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9259 /* MS.NET seems to silently convert this to a callvirt */
9264 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9265 * converts to a callvirt.
9267 * tests/bug-515884.il is an example of this behavior
9269 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9270 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9271 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9275 if (!cmethod->klass->inited)
9276 if (!mono_class_init (cmethod->klass))
9277 TYPE_LOAD_ERROR (cmethod->klass);
9279 fsig = mono_method_signature (cmethod);
9282 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9283 mini_class_is_system_array (cmethod->klass)) {
9284 array_rank = cmethod->klass->rank;
9285 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9286 direct_icall = TRUE;
9287 } else if (fsig->pinvoke) {
9288 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9289 fsig = mono_method_signature (wrapper);
9290 } else if (constrained_class) {
9292 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9296 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
9297 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9299 /* See code below */
9300 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9301 MonoBasicBlock *tbb;
9303 GET_BBLOCK (cfg, tbb, ip + 5);
9304 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9306 * We want to extend the try block to cover the call, but we can't do it if the
9307 * call is made directly since its followed by an exception check.
9309 direct_icall = FALSE;
9313 mono_save_token_info (cfg, image, token, cil_method);
9315 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9316 need_seq_point = TRUE;
9318 /* Don't support calls made using type arguments for now */
9320 if (cfg->gsharedvt) {
9321 if (mini_is_gsharedvt_signature (fsig))
9322 GSHAREDVT_FAILURE (*ip);
9326 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9327 g_assert_not_reached ();
9329 n = fsig->param_count + fsig->hasthis;
9331 if (!cfg->gshared && cmethod->klass->generic_container)
9335 g_assert (!mono_method_check_context_used (cmethod));
9339 //g_assert (!virtual_ || fsig->hasthis);
9344 * We have the `constrained.' prefix opcode.
9346 if (constrained_class) {
9347 if (mini_is_gsharedvt_klass (constrained_class)) {
9348 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9349 /* The 'Own method' case below */
9350 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9351 /* 'The type parameter is instantiated as a reference type' case below. */
9353 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9354 CHECK_CFG_EXCEPTION;
9360 if (constrained_partial_call) {
9361 gboolean need_box = TRUE;
9364 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9365 * called method is not known at compile time either. The called method could end up being
9366 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9367 * to box the receiver.
9368 * A simple solution would be to box always and make a normal virtual call, but that would
9369 * be bad performance wise.
9371 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9373 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9378 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9379 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9381 ins->klass = constrained_class;
9382 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9383 CHECK_CFG_EXCEPTION;
9384 } else if (need_box) {
9386 MonoBasicBlock *is_ref_bb, *end_bb;
9387 MonoInst *nonbox_call;
9390 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9392 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9393 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9395 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9397 NEW_BBLOCK (cfg, is_ref_bb);
9398 NEW_BBLOCK (cfg, end_bb);
9400 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9405 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9407 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9410 MONO_START_BB (cfg, is_ref_bb);
9411 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9412 ins->klass = constrained_class;
9413 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9414 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9416 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9418 MONO_START_BB (cfg, end_bb);
9421 nonbox_call->dreg = ins->dreg;
9424 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9425 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9426 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9429 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9431 * The type parameter is instantiated as a valuetype,
9432 * but that type doesn't override the method we're
9433 * calling, so we need to box `this'.
9435 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9436 ins->klass = constrained_class;
9437 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9438 CHECK_CFG_EXCEPTION;
9439 } else if (!constrained_class->valuetype) {
9440 int dreg = alloc_ireg_ref (cfg);
9443 * The type parameter is instantiated as a reference
9444 * type. We have a managed pointer on the stack, so
9445 * we need to dereference it here.
9447 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9448 ins->type = STACK_OBJ;
9451 if (cmethod->klass->valuetype) {
9454 /* Interface method */
9457 mono_class_setup_vtable (constrained_class);
9458 CHECK_TYPELOAD (constrained_class);
9459 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9461 TYPE_LOAD_ERROR (constrained_class);
9462 slot = mono_method_get_vtable_slot (cmethod);
9464 TYPE_LOAD_ERROR (cmethod->klass);
9465 cmethod = constrained_class->vtable [ioffset + slot];
9467 if (cmethod->klass == mono_defaults.enum_class) {
9468 /* Enum implements some interfaces, so treat this as the first case */
9469 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9470 ins->klass = constrained_class;
9471 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9472 CHECK_CFG_EXCEPTION;
9477 constrained_class = NULL;
9480 if (check_call_signature (cfg, fsig, sp))
9483 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9484 delegate_invoke = TRUE;
9486 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9487 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9488 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9496 * If the callee is a shared method, then its static cctor
9497 * might not get called after the call was patched.
9499 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9500 emit_class_init (cfg, cmethod->klass);
9501 CHECK_TYPELOAD (cmethod->klass);
9504 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9507 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9509 context_used = mini_method_check_context_used (cfg, cmethod);
9511 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9512 /* Generic method interface
9513 calls are resolved via a
9514 helper function and don't
9516 if (!cmethod_context || !cmethod_context->method_inst)
9517 pass_imt_from_rgctx = TRUE;
9521 * If a shared method calls another
9522 * shared method then the caller must
9523 * have a generic sharing context
9524 * because the magic trampoline
9525 * requires it. FIXME: We shouldn't
9526 * have to force the vtable/mrgctx
9527 * variable here. Instead there
9528 * should be a flag in the cfg to
9529 * request a generic sharing context.
9532 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9533 mono_get_vtable_var (cfg);
9538 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9540 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9542 CHECK_TYPELOAD (cmethod->klass);
9543 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9548 g_assert (!vtable_arg);
9550 if (!cfg->compile_aot) {
9552 * emit_get_rgctx_method () calls mono_class_vtable () so check
9553 * for type load errors before.
9555 mono_class_setup_vtable (cmethod->klass);
9556 CHECK_TYPELOAD (cmethod->klass);
9559 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9561 /* !marshalbyref is needed to properly handle generic methods + remoting */
9562 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9563 MONO_METHOD_IS_FINAL (cmethod)) &&
9564 !mono_class_is_marshalbyref (cmethod->klass)) {
9571 if (pass_imt_from_rgctx) {
9572 g_assert (!pass_vtable);
9574 imt_arg = emit_get_rgctx_method (cfg, context_used,
9575 cmethod, MONO_RGCTX_INFO_METHOD);
9579 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9581 /* Calling virtual generic methods */
9582 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9583 !(MONO_METHOD_IS_FINAL (cmethod) &&
9584 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9585 fsig->generic_param_count &&
9586 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9588 MonoInst *this_temp, *this_arg_temp, *store;
9589 MonoInst *iargs [4];
9591 g_assert (fsig->is_inflated);
9593 /* Prevent inlining of methods that contain indirect calls */
9594 INLINE_FAILURE ("virtual generic call");
9596 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9597 GSHAREDVT_FAILURE (*ip);
9599 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9600 g_assert (!imt_arg);
9602 g_assert (cmethod->is_inflated);
9603 imt_arg = emit_get_rgctx_method (cfg, context_used,
9604 cmethod, MONO_RGCTX_INFO_METHOD);
9605 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9607 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9608 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9609 MONO_ADD_INS (cfg->cbb, store);
9611 /* FIXME: This should be a managed pointer */
9612 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9614 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9615 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9616 cmethod, MONO_RGCTX_INFO_METHOD);
9617 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9618 addr = mono_emit_jit_icall (cfg,
9619 mono_helper_compile_generic_method, iargs);
9621 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9623 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9630 * Implement a workaround for the inherent races involved in locking:
9636 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9637 * try block, the Exit () won't be executed, see:
9638 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9639 * To work around this, we extend such try blocks to include the last x bytes
9640 * of the Monitor.Enter () call.
9642 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9643 MonoBasicBlock *tbb;
9645 GET_BBLOCK (cfg, tbb, ip + 5);
9647 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9648 * from Monitor.Enter like ArgumentNullException.
9650 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9651 /* Mark this bblock as needing to be extended */
9652 tbb->extend_try_block = TRUE;
9656 /* Conversion to a JIT intrinsic */
9657 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9658 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9659 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9667 if ((cfg->opt & MONO_OPT_INLINE) &&
9668 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9669 mono_method_check_inlining (cfg, cmethod)) {
9671 gboolean always = FALSE;
9673 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9674 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9675 /* Prevent inlining of methods that call wrappers */
9676 INLINE_FAILURE ("wrapper call");
9677 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9681 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9683 cfg->real_offset += 5;
9685 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9686 /* *sp is already set by inline_method */
9691 inline_costs += costs;
9697 /* Tail recursion elimination */
9698 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9699 gboolean has_vtargs = FALSE;
9702 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9703 INLINE_FAILURE ("tail call");
9705 /* keep it simple */
9706 for (i = fsig->param_count - 1; i >= 0; i--) {
9707 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9712 for (i = 0; i < n; ++i)
9713 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9714 MONO_INST_NEW (cfg, ins, OP_BR);
9715 MONO_ADD_INS (cfg->cbb, ins);
9716 tblock = start_bblock->out_bb [0];
9717 link_bblock (cfg, cfg->cbb, tblock);
9718 ins->inst_target_bb = tblock;
9719 start_new_bblock = 1;
9721 /* skip the CEE_RET, too */
9722 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9729 inline_costs += 10 * num_calls++;
9732 * Making generic calls out of gsharedvt methods.
9733 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9734 * patching gshared method addresses into a gsharedvt method.
9736 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9737 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9738 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9739 MonoRgctxInfoType info_type;
9742 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9743 //GSHAREDVT_FAILURE (*ip);
9744 // disable for possible remoting calls
9745 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9746 GSHAREDVT_FAILURE (*ip);
9747 if (fsig->generic_param_count) {
9748 /* virtual generic call */
9749 g_assert (!imt_arg);
9750 /* Same as the virtual generic case above */
9751 imt_arg = emit_get_rgctx_method (cfg, context_used,
9752 cmethod, MONO_RGCTX_INFO_METHOD);
9753 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9755 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9756 /* This can happen when we call a fully instantiated iface method */
9757 imt_arg = emit_get_rgctx_method (cfg, context_used,
9758 cmethod, MONO_RGCTX_INFO_METHOD);
9763 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9764 keep_this_alive = sp [0];
9766 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9767 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9769 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9770 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9772 if (cfg->llvm_only) {
9773 // FIXME: Avoid initializing vtable_arg
9774 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9776 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9781 /* Generic sharing */
9784 * Use this if the callee is gsharedvt sharable too, since
9785 * at runtime we might find an instantiation so the call cannot
9786 * be patched (the 'no_patch' code path in mini-trampolines.c).
9788 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9789 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9790 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9791 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9792 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9793 INLINE_FAILURE ("gshared");
9795 g_assert (cfg->gshared && cmethod);
9799 * We are compiling a call to a
9800 * generic method from shared code,
9801 * which means that we have to look up
9802 * the method in the rgctx and do an
9806 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9808 if (cfg->llvm_only) {
9809 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9810 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9812 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9813 // FIXME: Avoid initializing imt_arg/vtable_arg
9814 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9816 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9817 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9822 /* Direct calls to icalls */
9824 MonoMethod *wrapper;
9827 /* Inline the wrapper */
9828 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9830 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9831 g_assert (costs > 0);
9832 cfg->real_offset += 5;
9834 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9835 /* *sp is already set by inline_method */
9840 inline_costs += costs;
9849 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9850 MonoInst *val = sp [fsig->param_count];
9852 if (val->type == STACK_OBJ) {
9853 MonoInst *iargs [2];
9858 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9861 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9862 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9863 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9864 emit_write_barrier (cfg, addr, val);
9865 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9866 GSHAREDVT_FAILURE (*ip);
9867 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9868 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9870 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9871 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9872 if (!cmethod->klass->element_class->valuetype && !readonly)
9873 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9874 CHECK_TYPELOAD (cmethod->klass);
9877 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9880 g_assert_not_reached ();
9887 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9891 /* Tail prefix / tail call optimization */
9893 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9894 /* FIXME: runtime generic context pointer for jumps? */
9895 /* FIXME: handle this for generic sharing eventually */
9896 if ((ins_flag & MONO_INST_TAILCALL) &&
9897 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9898 supported_tail_call = TRUE;
9900 if (supported_tail_call) {
9903 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9904 INLINE_FAILURE ("tail call");
9906 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9908 if (cfg->backend->have_op_tail_call) {
9909 /* Handle tail calls similarly to normal calls */
9912 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9914 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9915 call->tail_call = TRUE;
9916 call->method = cmethod;
9917 call->signature = mono_method_signature (cmethod);
9920 * We implement tail calls by storing the actual arguments into the
9921 * argument variables, then emitting a CEE_JMP.
9923 for (i = 0; i < n; ++i) {
9924 /* Prevent argument from being register allocated */
9925 arg_array [i]->flags |= MONO_INST_VOLATILE;
9926 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9928 ins = (MonoInst*)call;
9929 ins->inst_p0 = cmethod;
9930 ins->inst_p1 = arg_array [0];
9931 MONO_ADD_INS (cfg->cbb, ins);
9932 link_bblock (cfg, cfg->cbb, end_bblock);
9933 start_new_bblock = 1;
9935 // FIXME: Eliminate unreachable epilogs
9938 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9939 * only reachable from this call.
9941 GET_BBLOCK (cfg, tblock, ip + 5);
9942 if (tblock == cfg->cbb || tblock->in_count == 0)
9951 * Synchronized wrappers.
9952 * Its hard to determine where to replace a method with its synchronized
9953 * wrapper without causing an infinite recursion. The current solution is
9954 * to add the synchronized wrapper in the trampolines, and to
9955 * change the called method to a dummy wrapper, and resolve that wrapper
9956 * to the real method in mono_jit_compile_method ().
9958 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9959 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9960 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9961 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9965 * Virtual calls in llvm-only mode.
9967 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9968 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9973 INLINE_FAILURE ("call");
9974 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9975 imt_arg, vtable_arg);
9977 if (tail_call && !cfg->llvm_only) {
9978 link_bblock (cfg, cfg->cbb, end_bblock);
9979 start_new_bblock = 1;
9981 // FIXME: Eliminate unreachable epilogs
9984 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9985 * only reachable from this call.
9987 GET_BBLOCK (cfg, tblock, ip + 5);
9988 if (tblock == cfg->cbb || tblock->in_count == 0)
9995 /* End of call, INS should contain the result of the call, if any */
9997 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10000 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10005 if (keep_this_alive) {
10006 MonoInst *dummy_use;
10008 /* See mono_emit_method_call_full () */
10009 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10012 CHECK_CFG_EXCEPTION;
10016 g_assert (*ip == CEE_RET);
10020 constrained_class = NULL;
10021 if (need_seq_point)
10022 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10026 if (cfg->method != method) {
10027 /* return from inlined method */
10029 * If in_count == 0, that means the ret is unreachable due to
10030 * being preceeded by a throw. In that case, inline_method () will
10031 * handle setting the return value
10032 * (test case: test_0_inline_throw ()).
10034 if (return_var && cfg->cbb->in_count) {
10035 MonoType *ret_type = mono_method_signature (method)->ret;
10041 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10044 //g_assert (returnvar != -1);
10045 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10046 cfg->ret_var_set = TRUE;
10049 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10051 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10052 emit_pop_lmf (cfg);
10055 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10057 if (seq_points && !sym_seq_points) {
10059 * Place a seq point here too even through the IL stack is not
10060 * empty, so a step over on
10063 * will work correctly.
10065 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10066 MONO_ADD_INS (cfg->cbb, ins);
10069 g_assert (!return_var);
10073 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10076 emit_setret (cfg, *sp);
10079 if (sp != stack_start)
10081 MONO_INST_NEW (cfg, ins, OP_BR);
10083 ins->inst_target_bb = end_bblock;
10084 MONO_ADD_INS (cfg->cbb, ins);
10085 link_bblock (cfg, cfg->cbb, end_bblock);
10086 start_new_bblock = 1;
10090 MONO_INST_NEW (cfg, ins, OP_BR);
10092 target = ip + 1 + (signed char)(*ip);
10094 GET_BBLOCK (cfg, tblock, target);
10095 link_bblock (cfg, cfg->cbb, tblock);
10096 ins->inst_target_bb = tblock;
10097 if (sp != stack_start) {
10098 handle_stack_args (cfg, stack_start, sp - stack_start);
10100 CHECK_UNVERIFIABLE (cfg);
10102 MONO_ADD_INS (cfg->cbb, ins);
10103 start_new_bblock = 1;
10104 inline_costs += BRANCH_COST;
10118 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10120 target = ip + 1 + *(signed char*)ip;
10123 ADD_BINCOND (NULL);
10126 inline_costs += BRANCH_COST;
10130 MONO_INST_NEW (cfg, ins, OP_BR);
10133 target = ip + 4 + (gint32)read32(ip);
10135 GET_BBLOCK (cfg, tblock, target);
10136 link_bblock (cfg, cfg->cbb, tblock);
10137 ins->inst_target_bb = tblock;
10138 if (sp != stack_start) {
10139 handle_stack_args (cfg, stack_start, sp - stack_start);
10141 CHECK_UNVERIFIABLE (cfg);
10144 MONO_ADD_INS (cfg->cbb, ins);
10146 start_new_bblock = 1;
10147 inline_costs += BRANCH_COST;
10149 case CEE_BRFALSE_S:
10154 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10155 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10156 guint32 opsize = is_short ? 1 : 4;
10158 CHECK_OPSIZE (opsize);
10160 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10163 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10168 GET_BBLOCK (cfg, tblock, target);
10169 link_bblock (cfg, cfg->cbb, tblock);
10170 GET_BBLOCK (cfg, tblock, ip);
10171 link_bblock (cfg, cfg->cbb, tblock);
10173 if (sp != stack_start) {
10174 handle_stack_args (cfg, stack_start, sp - stack_start);
10175 CHECK_UNVERIFIABLE (cfg);
10178 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10179 cmp->sreg1 = sp [0]->dreg;
10180 type_from_op (cfg, cmp, sp [0], NULL);
10183 #if SIZEOF_REGISTER == 4
10184 if (cmp->opcode == OP_LCOMPARE_IMM) {
10185 /* Convert it to OP_LCOMPARE */
10186 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10187 ins->type = STACK_I8;
10188 ins->dreg = alloc_dreg (cfg, STACK_I8);
10190 MONO_ADD_INS (cfg->cbb, ins);
10191 cmp->opcode = OP_LCOMPARE;
10192 cmp->sreg2 = ins->dreg;
10195 MONO_ADD_INS (cfg->cbb, cmp);
10197 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10198 type_from_op (cfg, ins, sp [0], NULL);
10199 MONO_ADD_INS (cfg->cbb, ins);
10200 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10201 GET_BBLOCK (cfg, tblock, target);
10202 ins->inst_true_bb = tblock;
10203 GET_BBLOCK (cfg, tblock, ip);
10204 ins->inst_false_bb = tblock;
10205 start_new_bblock = 2;
10208 inline_costs += BRANCH_COST;
10223 MONO_INST_NEW (cfg, ins, *ip);
10225 target = ip + 4 + (gint32)read32(ip);
10228 ADD_BINCOND (NULL);
10231 inline_costs += BRANCH_COST;
10235 MonoBasicBlock **targets;
10236 MonoBasicBlock *default_bblock;
10237 MonoJumpInfoBBTable *table;
10238 int offset_reg = alloc_preg (cfg);
10239 int target_reg = alloc_preg (cfg);
10240 int table_reg = alloc_preg (cfg);
10241 int sum_reg = alloc_preg (cfg);
10242 gboolean use_op_switch;
10246 n = read32 (ip + 1);
10249 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10253 CHECK_OPSIZE (n * sizeof (guint32));
10254 target = ip + n * sizeof (guint32);
10256 GET_BBLOCK (cfg, default_bblock, target);
10257 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10259 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10260 for (i = 0; i < n; ++i) {
10261 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10262 targets [i] = tblock;
10263 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10267 if (sp != stack_start) {
10269 * Link the current bb with the targets as well, so handle_stack_args
10270 * will set their in_stack correctly.
10272 link_bblock (cfg, cfg->cbb, default_bblock);
10273 for (i = 0; i < n; ++i)
10274 link_bblock (cfg, cfg->cbb, targets [i]);
10276 handle_stack_args (cfg, stack_start, sp - stack_start);
10278 CHECK_UNVERIFIABLE (cfg);
10280 /* Undo the links */
10281 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10282 for (i = 0; i < n; ++i)
10283 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10287 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10289 for (i = 0; i < n; ++i)
10290 link_bblock (cfg, cfg->cbb, targets [i]);
10292 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10293 table->table = targets;
10294 table->table_size = n;
10296 use_op_switch = FALSE;
10298 /* ARM implements SWITCH statements differently */
10299 /* FIXME: Make it use the generic implementation */
10300 if (!cfg->compile_aot)
10301 use_op_switch = TRUE;
10304 if (COMPILE_LLVM (cfg))
10305 use_op_switch = TRUE;
10307 cfg->cbb->has_jump_table = 1;
10309 if (use_op_switch) {
10310 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10311 ins->sreg1 = src1->dreg;
10312 ins->inst_p0 = table;
10313 ins->inst_many_bb = targets;
10314 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10315 MONO_ADD_INS (cfg->cbb, ins);
10317 if (sizeof (gpointer) == 8)
10318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10322 #if SIZEOF_REGISTER == 8
10323 /* The upper word might not be zero, and we add it to a 64 bit address later */
10324 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10327 if (cfg->compile_aot) {
10328 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10330 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10331 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10332 ins->inst_p0 = table;
10333 ins->dreg = table_reg;
10334 MONO_ADD_INS (cfg->cbb, ins);
10337 /* FIXME: Use load_memindex */
10338 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10339 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10340 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10342 start_new_bblock = 1;
10343 inline_costs += (BRANCH_COST * 2);
10356 case CEE_LDIND_REF:
10363 dreg = alloc_freg (cfg);
10366 dreg = alloc_lreg (cfg);
10368 case CEE_LDIND_REF:
10369 dreg = alloc_ireg_ref (cfg);
10372 dreg = alloc_preg (cfg);
10375 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10376 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10377 if (*ip == CEE_LDIND_R4)
10378 ins->type = cfg->r4_stack_type;
10379 ins->flags |= ins_flag;
10380 MONO_ADD_INS (cfg->cbb, ins);
10382 if (ins_flag & MONO_INST_VOLATILE) {
10383 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10384 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10389 case CEE_STIND_REF:
10400 if (ins_flag & MONO_INST_VOLATILE) {
10401 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10402 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10405 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10406 ins->flags |= ins_flag;
10409 MONO_ADD_INS (cfg->cbb, ins);
10411 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10412 emit_write_barrier (cfg, sp [0], sp [1]);
10421 MONO_INST_NEW (cfg, ins, (*ip));
10423 ins->sreg1 = sp [0]->dreg;
10424 ins->sreg2 = sp [1]->dreg;
10425 type_from_op (cfg, ins, sp [0], sp [1]);
10427 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10429 /* Use the immediate opcodes if possible */
10430 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10431 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10432 if (imm_opcode != -1) {
10433 ins->opcode = imm_opcode;
10434 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10437 NULLIFY_INS (sp [1]);
10441 MONO_ADD_INS ((cfg)->cbb, (ins));
10443 *sp++ = mono_decompose_opcode (cfg, ins);
10460 MONO_INST_NEW (cfg, ins, (*ip));
10462 ins->sreg1 = sp [0]->dreg;
10463 ins->sreg2 = sp [1]->dreg;
10464 type_from_op (cfg, ins, sp [0], sp [1]);
10466 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10467 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10469 /* FIXME: Pass opcode to is_inst_imm */
10471 /* Use the immediate opcodes if possible */
10472 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10473 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10474 if (imm_opcode != -1) {
10475 ins->opcode = imm_opcode;
10476 if (sp [1]->opcode == OP_I8CONST) {
10477 #if SIZEOF_REGISTER == 8
10478 ins->inst_imm = sp [1]->inst_l;
10480 ins->inst_ls_word = sp [1]->inst_ls_word;
10481 ins->inst_ms_word = sp [1]->inst_ms_word;
10485 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10488 /* Might be followed by an instruction added by add_widen_op */
10489 if (sp [1]->next == NULL)
10490 NULLIFY_INS (sp [1]);
10493 MONO_ADD_INS ((cfg)->cbb, (ins));
10495 *sp++ = mono_decompose_opcode (cfg, ins);
10508 case CEE_CONV_OVF_I8:
10509 case CEE_CONV_OVF_U8:
10510 case CEE_CONV_R_UN:
10513 /* Special case this earlier so we have long constants in the IR */
10514 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10515 int data = sp [-1]->inst_c0;
10516 sp [-1]->opcode = OP_I8CONST;
10517 sp [-1]->type = STACK_I8;
10518 #if SIZEOF_REGISTER == 8
10519 if ((*ip) == CEE_CONV_U8)
10520 sp [-1]->inst_c0 = (guint32)data;
10522 sp [-1]->inst_c0 = data;
10524 sp [-1]->inst_ls_word = data;
10525 if ((*ip) == CEE_CONV_U8)
10526 sp [-1]->inst_ms_word = 0;
10528 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10530 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10537 case CEE_CONV_OVF_I4:
10538 case CEE_CONV_OVF_I1:
10539 case CEE_CONV_OVF_I2:
10540 case CEE_CONV_OVF_I:
10541 case CEE_CONV_OVF_U:
10544 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10545 ADD_UNOP (CEE_CONV_OVF_I8);
10552 case CEE_CONV_OVF_U1:
10553 case CEE_CONV_OVF_U2:
10554 case CEE_CONV_OVF_U4:
10557 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10558 ADD_UNOP (CEE_CONV_OVF_U8);
10565 case CEE_CONV_OVF_I1_UN:
10566 case CEE_CONV_OVF_I2_UN:
10567 case CEE_CONV_OVF_I4_UN:
10568 case CEE_CONV_OVF_I8_UN:
10569 case CEE_CONV_OVF_U1_UN:
10570 case CEE_CONV_OVF_U2_UN:
10571 case CEE_CONV_OVF_U4_UN:
10572 case CEE_CONV_OVF_U8_UN:
10573 case CEE_CONV_OVF_I_UN:
10574 case CEE_CONV_OVF_U_UN:
10581 CHECK_CFG_EXCEPTION;
10585 case CEE_ADD_OVF_UN:
10587 case CEE_MUL_OVF_UN:
10589 case CEE_SUB_OVF_UN:
10595 GSHAREDVT_FAILURE (*ip);
10598 token = read32 (ip + 1);
10599 klass = mini_get_class (method, token, generic_context);
10600 CHECK_TYPELOAD (klass);
10602 if (generic_class_is_reference_type (cfg, klass)) {
10603 MonoInst *store, *load;
10604 int dreg = alloc_ireg_ref (cfg);
10606 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10607 load->flags |= ins_flag;
10608 MONO_ADD_INS (cfg->cbb, load);
10610 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10611 store->flags |= ins_flag;
10612 MONO_ADD_INS (cfg->cbb, store);
10614 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10615 emit_write_barrier (cfg, sp [0], sp [1]);
10617 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10623 int loc_index = -1;
10629 token = read32 (ip + 1);
10630 klass = mini_get_class (method, token, generic_context);
10631 CHECK_TYPELOAD (klass);
10633 /* Optimize the common ldobj+stloc combination */
10636 loc_index = ip [6];
10643 loc_index = ip [5] - CEE_STLOC_0;
10650 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10651 CHECK_LOCAL (loc_index);
10653 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10654 ins->dreg = cfg->locals [loc_index]->dreg;
10655 ins->flags |= ins_flag;
10658 if (ins_flag & MONO_INST_VOLATILE) {
10659 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10660 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10666 /* Optimize the ldobj+stobj combination */
10667 /* The reference case ends up being a load+store anyway */
10668 /* Skip this if the operation is volatile. */
10669 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10674 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10681 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10682 ins->flags |= ins_flag;
10685 if (ins_flag & MONO_INST_VOLATILE) {
10686 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10687 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10696 CHECK_STACK_OVF (1);
10698 n = read32 (ip + 1);
10700 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10701 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10702 ins->type = STACK_OBJ;
10705 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10706 MonoInst *iargs [1];
10707 char *str = (char *)mono_method_get_wrapper_data (method, n);
10709 if (cfg->compile_aot)
10710 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10712 EMIT_NEW_PCONST (cfg, iargs [0], str);
10713 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10715 if (cfg->opt & MONO_OPT_SHARED) {
10716 MonoInst *iargs [3];
10718 if (cfg->compile_aot) {
10719 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10721 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10722 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10723 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10724 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10725 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10728 if (cfg->cbb->out_of_line) {
10729 MonoInst *iargs [2];
10731 if (image == mono_defaults.corlib) {
10733 * Avoid relocations in AOT and save some space by using a
10734 * version of helper_ldstr specialized to mscorlib.
10736 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10737 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10739 /* Avoid creating the string object */
10740 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10741 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10742 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10746 if (cfg->compile_aot) {
10747 NEW_LDSTRCONST (cfg, ins, image, n);
10749 MONO_ADD_INS (cfg->cbb, ins);
10752 NEW_PCONST (cfg, ins, NULL);
10753 ins->type = STACK_OBJ;
10754 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10758 OUT_OF_MEMORY_FAILURE;
10761 MONO_ADD_INS (cfg->cbb, ins);
10770 MonoInst *iargs [2];
10771 MonoMethodSignature *fsig;
10774 MonoInst *vtable_arg = NULL;
10777 token = read32 (ip + 1);
10778 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10781 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10784 mono_save_token_info (cfg, image, token, cmethod);
10786 if (!mono_class_init (cmethod->klass))
10787 TYPE_LOAD_ERROR (cmethod->klass);
10789 context_used = mini_method_check_context_used (cfg, cmethod);
10791 if (mono_security_core_clr_enabled ())
10792 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10794 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10795 emit_class_init (cfg, cmethod->klass);
10796 CHECK_TYPELOAD (cmethod->klass);
10800 if (cfg->gsharedvt) {
10801 if (mini_is_gsharedvt_variable_signature (sig))
10802 GSHAREDVT_FAILURE (*ip);
10806 n = fsig->param_count;
10810 * Generate smaller code for the common newobj <exception> instruction in
10811 * argument checking code.
10813 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10814 is_exception_class (cmethod->klass) && n <= 2 &&
10815 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10816 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10817 MonoInst *iargs [3];
10821 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10824 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10827 iargs [1] = sp [0];
10828 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10831 iargs [1] = sp [0];
10832 iargs [2] = sp [1];
10833 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10836 g_assert_not_reached ();
10844 /* move the args to allow room for 'this' in the first position */
10850 /* check_call_signature () requires sp[0] to be set */
10851 this_ins.type = STACK_OBJ;
10852 sp [0] = &this_ins;
10853 if (check_call_signature (cfg, fsig, sp))
10858 if (mini_class_is_system_array (cmethod->klass)) {
10859 *sp = emit_get_rgctx_method (cfg, context_used,
10860 cmethod, MONO_RGCTX_INFO_METHOD);
10862 /* Avoid varargs in the common case */
10863 if (fsig->param_count == 1)
10864 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10865 else if (fsig->param_count == 2)
10866 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10867 else if (fsig->param_count == 3)
10868 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10869 else if (fsig->param_count == 4)
10870 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10872 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10873 } else if (cmethod->string_ctor) {
10874 g_assert (!context_used);
10875 g_assert (!vtable_arg);
10876 /* we simply pass a null pointer */
10877 EMIT_NEW_PCONST (cfg, *sp, NULL);
10878 /* now call the string ctor */
10879 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10881 if (cmethod->klass->valuetype) {
10882 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10883 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10884 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10889 * The code generated by mini_emit_virtual_call () expects
10890 * iargs [0] to be a boxed instance, but luckily the vcall
10891 * will be transformed into a normal call there.
10893 } else if (context_used) {
10894 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10897 MonoVTable *vtable = NULL;
10899 if (!cfg->compile_aot)
10900 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10901 CHECK_TYPELOAD (cmethod->klass);
10904 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10905 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10906 * As a workaround, we call class cctors before allocating objects.
10908 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10909 emit_class_init (cfg, cmethod->klass);
10910 if (cfg->verbose_level > 2)
10911 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10912 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10915 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10918 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10921 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10923 /* Now call the actual ctor */
10924 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10925 CHECK_CFG_EXCEPTION;
10928 if (alloc == NULL) {
10930 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10931 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10939 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10940 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10943 case CEE_CASTCLASS:
10948 token = read32 (ip + 1);
10949 klass = mini_get_class (method, token, generic_context);
10950 CHECK_TYPELOAD (klass);
10951 if (sp [0]->type != STACK_OBJ)
10954 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10955 ins->dreg = alloc_preg (cfg);
10956 ins->sreg1 = (*sp)->dreg;
10957 ins->klass = klass;
10958 ins->type = STACK_OBJ;
10959 MONO_ADD_INS (cfg->cbb, ins);
10961 CHECK_CFG_EXCEPTION;
10966 case CEE_UNBOX_ANY: {
10967 MonoInst *res, *addr;
10972 token = read32 (ip + 1);
10973 klass = mini_get_class (method, token, generic_context);
10974 CHECK_TYPELOAD (klass);
10976 mono_save_token_info (cfg, image, token, klass);
10978 context_used = mini_class_check_context_used (cfg, klass);
10980 if (mini_is_gsharedvt_klass (klass)) {
10981 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10983 } else if (generic_class_is_reference_type (cfg, klass)) {
10984 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10985 res->dreg = alloc_preg (cfg);
10986 res->sreg1 = (*sp)->dreg;
10987 res->klass = klass;
10988 res->type = STACK_OBJ;
10989 MONO_ADD_INS (cfg->cbb, res);
10990 } else if (mono_class_is_nullable (klass)) {
10991 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10993 addr = handle_unbox (cfg, klass, sp, context_used);
10995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11006 MonoClass *enum_class;
11007 MonoMethod *has_flag;
11013 token = read32 (ip + 1);
11014 klass = mini_get_class (method, token, generic_context);
11015 CHECK_TYPELOAD (klass);
11017 mono_save_token_info (cfg, image, token, klass);
11019 context_used = mini_class_check_context_used (cfg, klass);
11021 if (generic_class_is_reference_type (cfg, klass)) {
11027 if (klass == mono_defaults.void_class)
11029 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11031 /* frequent check in generic code: box (struct), brtrue */
11036 * <push int/long ptr>
11039 * constrained. MyFlags
11040 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11042 * If we find this sequence and the operand types on box and constrained
11043 * are equal, we can emit a specialized instruction sequence instead of
11044 * the very slow HasFlag () call.
11046 if ((cfg->opt & MONO_OPT_INTRINS) &&
11047 /* Cheap checks first. */
11048 ip + 5 + 6 + 5 < end &&
11049 ip [5] == CEE_PREFIX1 &&
11050 ip [6] == CEE_CONSTRAINED_ &&
11051 ip [11] == CEE_CALLVIRT &&
11052 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11053 mono_class_is_enum (klass) &&
11054 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11055 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11056 has_flag->klass == mono_defaults.enum_class &&
11057 !strcmp (has_flag->name, "HasFlag") &&
11058 has_flag->signature->hasthis &&
11059 has_flag->signature->param_count == 1) {
11060 CHECK_TYPELOAD (enum_class);
11062 if (enum_class == klass) {
11063 MonoInst *enum_this, *enum_flag;
11068 enum_this = sp [0];
11069 enum_flag = sp [1];
11071 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11076 // FIXME: LLVM can't handle the inconsistent bb linking
11077 if (!mono_class_is_nullable (klass) &&
11078 !mini_is_gsharedvt_klass (klass) &&
11079 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11080 (ip [5] == CEE_BRTRUE ||
11081 ip [5] == CEE_BRTRUE_S ||
11082 ip [5] == CEE_BRFALSE ||
11083 ip [5] == CEE_BRFALSE_S)) {
11084 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11086 MonoBasicBlock *true_bb, *false_bb;
11090 if (cfg->verbose_level > 3) {
11091 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11092 printf ("<box+brtrue opt>\n");
11097 case CEE_BRFALSE_S:
11100 target = ip + 1 + (signed char)(*ip);
11107 target = ip + 4 + (gint)(read32 (ip));
11111 g_assert_not_reached ();
11115 * We need to link both bblocks, since it is needed for handling stack
11116 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11117 * Branching to only one of them would lead to inconsistencies, so
11118 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11120 GET_BBLOCK (cfg, true_bb, target);
11121 GET_BBLOCK (cfg, false_bb, ip);
11123 mono_link_bblock (cfg, cfg->cbb, true_bb);
11124 mono_link_bblock (cfg, cfg->cbb, false_bb);
11126 if (sp != stack_start) {
11127 handle_stack_args (cfg, stack_start, sp - stack_start);
11129 CHECK_UNVERIFIABLE (cfg);
11132 if (COMPILE_LLVM (cfg)) {
11133 dreg = alloc_ireg (cfg);
11134 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11137 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11139 /* The JIT can't eliminate the iconst+compare */
11140 MONO_INST_NEW (cfg, ins, OP_BR);
11141 ins->inst_target_bb = is_true ? true_bb : false_bb;
11142 MONO_ADD_INS (cfg->cbb, ins);
11145 start_new_bblock = 1;
11149 *sp++ = handle_box (cfg, val, klass, context_used);
11151 CHECK_CFG_EXCEPTION;
11160 token = read32 (ip + 1);
11161 klass = mini_get_class (method, token, generic_context);
11162 CHECK_TYPELOAD (klass);
11164 mono_save_token_info (cfg, image, token, klass);
11166 context_used = mini_class_check_context_used (cfg, klass);
11168 if (mono_class_is_nullable (klass)) {
11171 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11172 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11176 ins = handle_unbox (cfg, klass, sp, context_used);
11189 MonoClassField *field;
11190 #ifndef DISABLE_REMOTING
11194 gboolean is_instance;
11196 gpointer addr = NULL;
11197 gboolean is_special_static;
11199 MonoInst *store_val = NULL;
11200 MonoInst *thread_ins;
11203 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11205 if (op == CEE_STFLD) {
11208 store_val = sp [1];
11213 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11215 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11218 if (op == CEE_STSFLD) {
11221 store_val = sp [0];
11226 token = read32 (ip + 1);
11227 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11228 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11229 klass = field->parent;
11232 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11235 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11236 FIELD_ACCESS_FAILURE (method, field);
11237 mono_class_init (klass);
11239 /* if the class is Critical then transparent code cannot access it's fields */
11240 if (!is_instance && mono_security_core_clr_enabled ())
11241 ensure_method_is_allowed_to_access_field (cfg, method, field);
11243 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11244 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11245 if (mono_security_core_clr_enabled ())
11246 ensure_method_is_allowed_to_access_field (cfg, method, field);
11249 ftype = mono_field_get_type (field);
11252 * LDFLD etc. is usable on static fields as well, so convert those cases to
11255 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11267 g_assert_not_reached ();
11269 is_instance = FALSE;
11272 context_used = mini_class_check_context_used (cfg, klass);
11274 /* INSTANCE CASE */
11276 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11277 if (op == CEE_STFLD) {
11278 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11280 #ifndef DISABLE_REMOTING
11281 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11282 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11283 MonoInst *iargs [5];
11285 GSHAREDVT_FAILURE (op);
11287 iargs [0] = sp [0];
11288 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11289 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11290 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11292 iargs [4] = sp [1];
11294 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11295 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11296 iargs, ip, cfg->real_offset, TRUE);
11297 CHECK_CFG_EXCEPTION;
11298 g_assert (costs > 0);
11300 cfg->real_offset += 5;
11302 inline_costs += costs;
11304 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11309 MonoInst *store, *wbarrier_ptr_ins = NULL;
11311 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11313 if (mini_is_gsharedvt_klass (klass)) {
11314 MonoInst *offset_ins;
11316 context_used = mini_class_check_context_used (cfg, klass);
11318 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11319 /* The value is offset by 1 */
11320 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11321 dreg = alloc_ireg_mp (cfg);
11322 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11323 wbarrier_ptr_ins = ins;
11324 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11325 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11327 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11329 if (sp [0]->opcode != OP_LDADDR)
11330 store->flags |= MONO_INST_FAULT;
11332 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11333 if (mini_is_gsharedvt_klass (klass)) {
11334 g_assert (wbarrier_ptr_ins);
11335 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
11337 /* insert call to write barrier */
11341 dreg = alloc_ireg_mp (cfg);
11342 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11343 emit_write_barrier (cfg, ptr, sp [1]);
11347 store->flags |= ins_flag;
11354 #ifndef DISABLE_REMOTING
11355 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11356 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11357 MonoInst *iargs [4];
11359 GSHAREDVT_FAILURE (op);
11361 iargs [0] = sp [0];
11362 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11363 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11364 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11365 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11366 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11367 iargs, ip, cfg->real_offset, TRUE);
11368 CHECK_CFG_EXCEPTION;
11369 g_assert (costs > 0);
11371 cfg->real_offset += 5;
11375 inline_costs += costs;
11377 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11383 if (sp [0]->type == STACK_VTYPE) {
11386 /* Have to compute the address of the variable */
11388 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11390 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11392 g_assert (var->klass == klass);
11394 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11398 if (op == CEE_LDFLDA) {
11399 if (sp [0]->type == STACK_OBJ) {
11400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11401 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11404 dreg = alloc_ireg_mp (cfg);
11406 if (mini_is_gsharedvt_klass (klass)) {
11407 MonoInst *offset_ins;
11409 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11410 /* The value is offset by 1 */
11411 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11412 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11414 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11416 ins->klass = mono_class_from_mono_type (field->type);
11417 ins->type = STACK_MP;
11422 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11424 if (mini_is_gsharedvt_klass (klass)) {
11425 MonoInst *offset_ins;
11427 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11428 /* The value is offset by 1 */
11429 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11430 dreg = alloc_ireg_mp (cfg);
11431 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11432 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11434 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11436 load->flags |= ins_flag;
11437 if (sp [0]->opcode != OP_LDADDR)
11438 load->flags |= MONO_INST_FAULT;
11450 context_used = mini_class_check_context_used (cfg, klass);
11452 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
11453 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
11457 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11458 * to be called here.
11460 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11461 mono_class_vtable (cfg->domain, klass);
11462 CHECK_TYPELOAD (klass);
11464 mono_domain_lock (cfg->domain);
11465 if (cfg->domain->special_static_fields)
11466 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11467 mono_domain_unlock (cfg->domain);
11469 is_special_static = mono_class_field_is_special_static (field);
11471 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11472 thread_ins = mono_get_thread_intrinsic (cfg);
11476 /* Generate IR to compute the field address */
11477 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11479 * Fast access to TLS data
11480 * Inline version of get_thread_static_data () in
11484 int idx, static_data_reg, array_reg, dreg;
11486 GSHAREDVT_FAILURE (op);
11488 MONO_ADD_INS (cfg->cbb, thread_ins);
11489 static_data_reg = alloc_ireg (cfg);
11490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11492 if (cfg->compile_aot) {
11493 int offset_reg, offset2_reg, idx_reg;
11495 /* For TLS variables, this will return the TLS offset */
11496 EMIT_NEW_SFLDACONST (cfg, ins, field);
11497 offset_reg = ins->dreg;
11498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11499 idx_reg = alloc_ireg (cfg);
11500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11502 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11503 array_reg = alloc_ireg (cfg);
11504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11505 offset2_reg = alloc_ireg (cfg);
11506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11508 dreg = alloc_ireg (cfg);
11509 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11511 offset = (gsize)addr & 0x7fffffff;
11512 idx = offset & 0x3f;
11514 array_reg = alloc_ireg (cfg);
11515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11516 dreg = alloc_ireg (cfg);
11517 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11519 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11520 (cfg->compile_aot && is_special_static) ||
11521 (context_used && is_special_static)) {
11522 MonoInst *iargs [2];
11524 g_assert (field->parent);
11525 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11526 if (context_used) {
11527 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11528 field, MONO_RGCTX_INFO_CLASS_FIELD);
11530 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11532 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11533 } else if (context_used) {
11534 MonoInst *static_data;
11537 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11538 method->klass->name_space, method->klass->name, method->name,
11539 depth, field->offset);
11542 if (mono_class_needs_cctor_run (klass, method))
11543 emit_class_init (cfg, klass);
11546 * The pointer we're computing here is
11548 * super_info.static_data + field->offset
11550 static_data = emit_get_rgctx_klass (cfg, context_used,
11551 klass, MONO_RGCTX_INFO_STATIC_DATA);
11553 if (mini_is_gsharedvt_klass (klass)) {
11554 MonoInst *offset_ins;
11556 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11557 /* The value is offset by 1 */
11558 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11559 dreg = alloc_ireg_mp (cfg);
11560 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11561 } else if (field->offset == 0) {
11564 int addr_reg = mono_alloc_preg (cfg);
11565 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11567 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11568 MonoInst *iargs [2];
11570 g_assert (field->parent);
11571 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11572 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11573 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11575 MonoVTable *vtable = NULL;
11577 if (!cfg->compile_aot)
11578 vtable = mono_class_vtable (cfg->domain, klass);
11579 CHECK_TYPELOAD (klass);
11582 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11583 if (!(g_slist_find (class_inits, klass))) {
11584 emit_class_init (cfg, klass);
11585 if (cfg->verbose_level > 2)
11586 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11587 class_inits = g_slist_prepend (class_inits, klass);
11590 if (cfg->run_cctors) {
11591 /* This makes so that inline cannot trigger */
11592 /* .cctors: too many apps depend on them */
11593 /* running with a specific order... */
11595 if (! vtable->initialized)
11596 INLINE_FAILURE ("class init");
11597 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11598 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11599 goto exception_exit;
11603 if (cfg->compile_aot)
11604 EMIT_NEW_SFLDACONST (cfg, ins, field);
11607 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11609 EMIT_NEW_PCONST (cfg, ins, addr);
11612 MonoInst *iargs [1];
11613 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11614 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11618 /* Generate IR to do the actual load/store operation */
11620 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11621 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11622 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11625 if (op == CEE_LDSFLDA) {
11626 ins->klass = mono_class_from_mono_type (ftype);
11627 ins->type = STACK_PTR;
11629 } else if (op == CEE_STSFLD) {
11632 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11633 store->flags |= ins_flag;
11635 gboolean is_const = FALSE;
11636 MonoVTable *vtable = NULL;
11637 gpointer addr = NULL;
11639 if (!context_used) {
11640 vtable = mono_class_vtable (cfg->domain, klass);
11641 CHECK_TYPELOAD (klass);
11643 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11644 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11645 int ro_type = ftype->type;
11647 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11648 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11649 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11652 GSHAREDVT_FAILURE (op);
11654 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11657 case MONO_TYPE_BOOLEAN:
11659 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11663 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11666 case MONO_TYPE_CHAR:
11668 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11672 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11677 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11681 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11686 case MONO_TYPE_PTR:
11687 case MONO_TYPE_FNPTR:
11688 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11689 type_to_eval_stack_type ((cfg), field->type, *sp);
11692 case MONO_TYPE_STRING:
11693 case MONO_TYPE_OBJECT:
11694 case MONO_TYPE_CLASS:
11695 case MONO_TYPE_SZARRAY:
11696 case MONO_TYPE_ARRAY:
11697 if (!mono_gc_is_moving ()) {
11698 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11699 type_to_eval_stack_type ((cfg), field->type, *sp);
11707 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11712 case MONO_TYPE_VALUETYPE:
11722 CHECK_STACK_OVF (1);
11724 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11725 load->flags |= ins_flag;
11731 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11732 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11733 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11744 token = read32 (ip + 1);
11745 klass = mini_get_class (method, token, generic_context);
11746 CHECK_TYPELOAD (klass);
11747 if (ins_flag & MONO_INST_VOLATILE) {
11748 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11749 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11751 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11752 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11753 ins->flags |= ins_flag;
11754 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11755 generic_class_is_reference_type (cfg, klass)) {
11756 /* insert call to write barrier */
11757 emit_write_barrier (cfg, sp [0], sp [1]);
11769 const char *data_ptr;
11771 guint32 field_token;
11777 token = read32 (ip + 1);
11779 klass = mini_get_class (method, token, generic_context);
11780 CHECK_TYPELOAD (klass);
11782 context_used = mini_class_check_context_used (cfg, klass);
11784 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11785 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11786 ins->sreg1 = sp [0]->dreg;
11787 ins->type = STACK_I4;
11788 ins->dreg = alloc_ireg (cfg);
11789 MONO_ADD_INS (cfg->cbb, ins);
11790 *sp = mono_decompose_opcode (cfg, ins);
11793 if (context_used) {
11794 MonoInst *args [3];
11795 MonoClass *array_class = mono_array_class_get (klass, 1);
11796 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11798 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11801 args [0] = emit_get_rgctx_klass (cfg, context_used,
11802 array_class, MONO_RGCTX_INFO_VTABLE);
11807 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11809 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11811 if (cfg->opt & MONO_OPT_SHARED) {
11812 /* Decompose now to avoid problems with references to the domainvar */
11813 MonoInst *iargs [3];
11815 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11816 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11817 iargs [2] = sp [0];
11819 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11821 /* Decompose later since it is needed by abcrem */
11822 MonoClass *array_type = mono_array_class_get (klass, 1);
11823 mono_class_vtable (cfg->domain, array_type);
11824 CHECK_TYPELOAD (array_type);
11826 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11827 ins->dreg = alloc_ireg_ref (cfg);
11828 ins->sreg1 = sp [0]->dreg;
11829 ins->inst_newa_class = klass;
11830 ins->type = STACK_OBJ;
11831 ins->klass = array_type;
11832 MONO_ADD_INS (cfg->cbb, ins);
11833 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11834 cfg->cbb->has_array_access = TRUE;
11836 /* Needed so mono_emit_load_get_addr () gets called */
11837 mono_get_got_var (cfg);
11847 * we inline/optimize the initialization sequence if possible.
11848 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11849 * for small sizes open code the memcpy
11850 * ensure the rva field is big enough
11852 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11853 MonoMethod *memcpy_method = get_memcpy_method ();
11854 MonoInst *iargs [3];
11855 int add_reg = alloc_ireg_mp (cfg);
11857 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11858 if (cfg->compile_aot) {
11859 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11861 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11863 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11864 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11873 if (sp [0]->type != STACK_OBJ)
11876 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11877 ins->dreg = alloc_preg (cfg);
11878 ins->sreg1 = sp [0]->dreg;
11879 ins->type = STACK_I4;
11880 /* This flag will be inherited by the decomposition */
11881 ins->flags |= MONO_INST_FAULT;
11882 MONO_ADD_INS (cfg->cbb, ins);
11883 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11884 cfg->cbb->has_array_access = TRUE;
11892 if (sp [0]->type != STACK_OBJ)
11895 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11897 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11898 CHECK_TYPELOAD (klass);
11899 /* we need to make sure that this array is exactly the type it needs
11900 * to be for correctness. the wrappers are lax with their usage
11901 * so we need to ignore them here
11903 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11904 MonoClass *array_class = mono_array_class_get (klass, 1);
11905 mini_emit_check_array_type (cfg, sp [0], array_class);
11906 CHECK_TYPELOAD (array_class);
11910 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11915 case CEE_LDELEM_I1:
11916 case CEE_LDELEM_U1:
11917 case CEE_LDELEM_I2:
11918 case CEE_LDELEM_U2:
11919 case CEE_LDELEM_I4:
11920 case CEE_LDELEM_U4:
11921 case CEE_LDELEM_I8:
11923 case CEE_LDELEM_R4:
11924 case CEE_LDELEM_R8:
11925 case CEE_LDELEM_REF: {
11931 if (*ip == CEE_LDELEM) {
11933 token = read32 (ip + 1);
11934 klass = mini_get_class (method, token, generic_context);
11935 CHECK_TYPELOAD (klass);
11936 mono_class_init (klass);
11939 klass = array_access_to_klass (*ip);
11941 if (sp [0]->type != STACK_OBJ)
11944 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11946 if (mini_is_gsharedvt_variable_klass (klass)) {
11947 // FIXME-VT: OP_ICONST optimization
11948 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11949 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11950 ins->opcode = OP_LOADV_MEMBASE;
11951 } else if (sp [1]->opcode == OP_ICONST) {
11952 int array_reg = sp [0]->dreg;
11953 int index_reg = sp [1]->dreg;
11954 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11956 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11957 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11959 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11960 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11962 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11963 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11966 if (*ip == CEE_LDELEM)
11973 case CEE_STELEM_I1:
11974 case CEE_STELEM_I2:
11975 case CEE_STELEM_I4:
11976 case CEE_STELEM_I8:
11977 case CEE_STELEM_R4:
11978 case CEE_STELEM_R8:
11979 case CEE_STELEM_REF:
11984 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11986 if (*ip == CEE_STELEM) {
11988 token = read32 (ip + 1);
11989 klass = mini_get_class (method, token, generic_context);
11990 CHECK_TYPELOAD (klass);
11991 mono_class_init (klass);
11994 klass = array_access_to_klass (*ip);
11996 if (sp [0]->type != STACK_OBJ)
11999 emit_array_store (cfg, klass, sp, TRUE);
12001 if (*ip == CEE_STELEM)
12008 case CEE_CKFINITE: {
12012 if (cfg->llvm_only) {
12013 MonoInst *iargs [1];
12015 iargs [0] = sp [0];
12016 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12018 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12019 ins->sreg1 = sp [0]->dreg;
12020 ins->dreg = alloc_freg (cfg);
12021 ins->type = STACK_R8;
12022 MONO_ADD_INS (cfg->cbb, ins);
12024 *sp++ = mono_decompose_opcode (cfg, ins);
12030 case CEE_REFANYVAL: {
12031 MonoInst *src_var, *src;
12033 int klass_reg = alloc_preg (cfg);
12034 int dreg = alloc_preg (cfg);
12036 GSHAREDVT_FAILURE (*ip);
12039 MONO_INST_NEW (cfg, ins, *ip);
12042 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12043 CHECK_TYPELOAD (klass);
12045 context_used = mini_class_check_context_used (cfg, klass);
12048 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12050 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12051 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12054 if (context_used) {
12055 MonoInst *klass_ins;
12057 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12058 klass, MONO_RGCTX_INFO_KLASS);
12061 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12062 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12064 mini_emit_class_check (cfg, klass_reg, klass);
12066 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12067 ins->type = STACK_MP;
12068 ins->klass = klass;
12073 case CEE_MKREFANY: {
12074 MonoInst *loc, *addr;
12076 GSHAREDVT_FAILURE (*ip);
12079 MONO_INST_NEW (cfg, ins, *ip);
12082 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12083 CHECK_TYPELOAD (klass);
12085 context_used = mini_class_check_context_used (cfg, klass);
12087 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12088 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12090 if (context_used) {
12091 MonoInst *const_ins;
12092 int type_reg = alloc_preg (cfg);
12094 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12095 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12097 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12098 } else if (cfg->compile_aot) {
12099 int const_reg = alloc_preg (cfg);
12100 int type_reg = alloc_preg (cfg);
12102 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12103 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12105 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12107 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12108 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12112 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12113 ins->type = STACK_VTYPE;
12114 ins->klass = mono_defaults.typed_reference_class;
12119 case CEE_LDTOKEN: {
12121 MonoClass *handle_class;
12123 CHECK_STACK_OVF (1);
12126 n = read32 (ip + 1);
12128 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12129 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12130 handle = mono_method_get_wrapper_data (method, n);
12131 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12132 if (handle_class == mono_defaults.typehandle_class)
12133 handle = &((MonoClass*)handle)->byval_arg;
12136 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12141 mono_class_init (handle_class);
12142 if (cfg->gshared) {
12143 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12144 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12145 /* This case handles ldtoken
12146 of an open type, like for
12149 } else if (handle_class == mono_defaults.typehandle_class) {
12150 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12151 } else if (handle_class == mono_defaults.fieldhandle_class)
12152 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12153 else if (handle_class == mono_defaults.methodhandle_class)
12154 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12156 g_assert_not_reached ();
12159 if ((cfg->opt & MONO_OPT_SHARED) &&
12160 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12161 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12162 MonoInst *addr, *vtvar, *iargs [3];
12163 int method_context_used;
12165 method_context_used = mini_method_check_context_used (cfg, method);
12167 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12169 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12170 EMIT_NEW_ICONST (cfg, iargs [1], n);
12171 if (method_context_used) {
12172 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12173 method, MONO_RGCTX_INFO_METHOD);
12174 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12176 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12177 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12179 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12181 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12183 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12185 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12186 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12187 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12188 (cmethod->klass == mono_defaults.systemtype_class) &&
12189 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12190 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12192 mono_class_init (tclass);
12193 if (context_used) {
12194 ins = emit_get_rgctx_klass (cfg, context_used,
12195 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12196 } else if (cfg->compile_aot) {
12197 if (method->wrapper_type) {
12198 mono_error_init (&error); //got to do it since there are multiple conditionals below
12199 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12200 /* Special case for static synchronized wrappers */
12201 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12203 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12204 /* FIXME: n is not a normal token */
12206 EMIT_NEW_PCONST (cfg, ins, NULL);
12209 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12212 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12214 EMIT_NEW_PCONST (cfg, ins, rt);
12216 ins->type = STACK_OBJ;
12217 ins->klass = cmethod->klass;
12220 MonoInst *addr, *vtvar;
12222 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12224 if (context_used) {
12225 if (handle_class == mono_defaults.typehandle_class) {
12226 ins = emit_get_rgctx_klass (cfg, context_used,
12227 mono_class_from_mono_type ((MonoType *)handle),
12228 MONO_RGCTX_INFO_TYPE);
12229 } else if (handle_class == mono_defaults.methodhandle_class) {
12230 ins = emit_get_rgctx_method (cfg, context_used,
12231 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12232 } else if (handle_class == mono_defaults.fieldhandle_class) {
12233 ins = emit_get_rgctx_field (cfg, context_used,
12234 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12236 g_assert_not_reached ();
12238 } else if (cfg->compile_aot) {
12239 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12241 EMIT_NEW_PCONST (cfg, ins, handle);
12243 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12244 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12245 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12255 MONO_INST_NEW (cfg, ins, OP_THROW);
12257 ins->sreg1 = sp [0]->dreg;
12259 cfg->cbb->out_of_line = TRUE;
12260 MONO_ADD_INS (cfg->cbb, ins);
12261 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12262 MONO_ADD_INS (cfg->cbb, ins);
12265 link_bblock (cfg, cfg->cbb, end_bblock);
12266 start_new_bblock = 1;
12267 /* This can complicate code generation for llvm since the return value might not be defined */
12268 if (COMPILE_LLVM (cfg))
12269 INLINE_FAILURE ("throw");
12271 case CEE_ENDFINALLY:
12272 /* mono_save_seq_point_info () depends on this */
12273 if (sp != stack_start)
12274 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12275 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12276 MONO_ADD_INS (cfg->cbb, ins);
12278 start_new_bblock = 1;
12281 * Control will leave the method so empty the stack, otherwise
12282 * the next basic block will start with a nonempty stack.
12284 while (sp != stack_start) {
12289 case CEE_LEAVE_S: {
12292 if (*ip == CEE_LEAVE) {
12294 target = ip + 5 + (gint32)read32(ip + 1);
12297 target = ip + 2 + (signed char)(ip [1]);
12300 /* empty the stack */
12301 while (sp != stack_start) {
12306 * If this leave statement is in a catch block, check for a
12307 * pending exception, and rethrow it if necessary.
12308 * We avoid doing this in runtime invoke wrappers, since those are called
12309 * by native code which excepts the wrapper to catch all exceptions.
12311 for (i = 0; i < header->num_clauses; ++i) {
12312 MonoExceptionClause *clause = &header->clauses [i];
12315 * Use <= in the final comparison to handle clauses with multiple
12316 * leave statements, like in bug #78024.
12317 * The ordering of the exception clauses guarantees that we find the
12318 * innermost clause.
12320 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12322 MonoBasicBlock *dont_throw;
12327 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12330 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12332 NEW_BBLOCK (cfg, dont_throw);
12335 * Currently, we always rethrow the abort exception, despite the
12336 * fact that this is not correct. See thread6.cs for an example.
12337 * But propagating the abort exception is more important than
12338 * getting the sematics right.
12340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12342 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12344 MONO_START_BB (cfg, dont_throw);
12349 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12352 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12354 MonoExceptionClause *clause;
12356 for (tmp = handlers; tmp; tmp = tmp->next) {
12357 clause = (MonoExceptionClause *)tmp->data;
12358 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12360 link_bblock (cfg, cfg->cbb, tblock);
12361 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12362 ins->inst_target_bb = tblock;
12363 ins->inst_eh_block = clause;
12364 MONO_ADD_INS (cfg->cbb, ins);
12365 cfg->cbb->has_call_handler = 1;
12366 if (COMPILE_LLVM (cfg)) {
12367 MonoBasicBlock *target_bb;
12370 * Link the finally bblock with the target, since it will
12371 * conceptually branch there.
12373 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12374 GET_BBLOCK (cfg, target_bb, target);
12375 link_bblock (cfg, tblock, target_bb);
12378 g_list_free (handlers);
12381 MONO_INST_NEW (cfg, ins, OP_BR);
12382 MONO_ADD_INS (cfg->cbb, ins);
12383 GET_BBLOCK (cfg, tblock, target);
12384 link_bblock (cfg, cfg->cbb, tblock);
12385 ins->inst_target_bb = tblock;
12387 start_new_bblock = 1;
12389 if (*ip == CEE_LEAVE)
12398 * Mono specific opcodes
12400 case MONO_CUSTOM_PREFIX: {
12402 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12406 case CEE_MONO_ICALL: {
12408 MonoJitICallInfo *info;
12410 token = read32 (ip + 2);
12411 func = mono_method_get_wrapper_data (method, token);
12412 info = mono_find_jit_icall_by_addr (func);
12414 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12417 CHECK_STACK (info->sig->param_count);
12418 sp -= info->sig->param_count;
12420 ins = mono_emit_jit_icall (cfg, info->func, sp);
12421 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12425 inline_costs += 10 * num_calls++;
12429 case CEE_MONO_LDPTR_CARD_TABLE:
12430 case CEE_MONO_LDPTR_NURSERY_START:
12431 case CEE_MONO_LDPTR_NURSERY_BITS:
12432 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12433 CHECK_STACK_OVF (1);
12436 case CEE_MONO_LDPTR_CARD_TABLE:
12437 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12439 case CEE_MONO_LDPTR_NURSERY_START:
12440 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12442 case CEE_MONO_LDPTR_NURSERY_BITS:
12443 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12445 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12446 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12452 inline_costs += 10 * num_calls++;
12455 case CEE_MONO_LDPTR: {
12458 CHECK_STACK_OVF (1);
12460 token = read32 (ip + 2);
12462 ptr = mono_method_get_wrapper_data (method, token);
12463 EMIT_NEW_PCONST (cfg, ins, ptr);
12466 inline_costs += 10 * num_calls++;
12467 /* Can't embed random pointers into AOT code */
12471 case CEE_MONO_JIT_ICALL_ADDR: {
12472 MonoJitICallInfo *callinfo;
12475 CHECK_STACK_OVF (1);
12477 token = read32 (ip + 2);
12479 ptr = mono_method_get_wrapper_data (method, token);
12480 callinfo = mono_find_jit_icall_by_addr (ptr);
12481 g_assert (callinfo);
12482 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12485 inline_costs += 10 * num_calls++;
12488 case CEE_MONO_ICALL_ADDR: {
12489 MonoMethod *cmethod;
12492 CHECK_STACK_OVF (1);
12494 token = read32 (ip + 2);
12496 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12498 if (cfg->compile_aot) {
12499 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12501 ptr = mono_lookup_internal_call (cmethod);
12503 EMIT_NEW_PCONST (cfg, ins, ptr);
12509 case CEE_MONO_VTADDR: {
12510 MonoInst *src_var, *src;
12516 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12517 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12522 case CEE_MONO_NEWOBJ: {
12523 MonoInst *iargs [2];
12525 CHECK_STACK_OVF (1);
12527 token = read32 (ip + 2);
12528 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12529 mono_class_init (klass);
12530 NEW_DOMAINCONST (cfg, iargs [0]);
12531 MONO_ADD_INS (cfg->cbb, iargs [0]);
12532 NEW_CLASSCONST (cfg, iargs [1], klass);
12533 MONO_ADD_INS (cfg->cbb, iargs [1]);
12534 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12536 inline_costs += 10 * num_calls++;
12539 case CEE_MONO_OBJADDR:
12542 MONO_INST_NEW (cfg, ins, OP_MOVE);
12543 ins->dreg = alloc_ireg_mp (cfg);
12544 ins->sreg1 = sp [0]->dreg;
12545 ins->type = STACK_MP;
12546 MONO_ADD_INS (cfg->cbb, ins);
12550 case CEE_MONO_LDNATIVEOBJ:
12552 * Similar to LDOBJ, but instead load the unmanaged
12553 * representation of the vtype to the stack.
12558 token = read32 (ip + 2);
12559 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12560 g_assert (klass->valuetype);
12561 mono_class_init (klass);
12564 MonoInst *src, *dest, *temp;
12567 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12568 temp->backend.is_pinvoke = 1;
12569 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12570 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12572 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12573 dest->type = STACK_VTYPE;
12574 dest->klass = klass;
12580 case CEE_MONO_RETOBJ: {
12582 * Same as RET, but return the native representation of a vtype
12585 g_assert (cfg->ret);
12586 g_assert (mono_method_signature (method)->pinvoke);
12591 token = read32 (ip + 2);
12592 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12594 if (!cfg->vret_addr) {
12595 g_assert (cfg->ret_var_is_local);
12597 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12599 EMIT_NEW_RETLOADA (cfg, ins);
12601 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12603 if (sp != stack_start)
12606 MONO_INST_NEW (cfg, ins, OP_BR);
12607 ins->inst_target_bb = end_bblock;
12608 MONO_ADD_INS (cfg->cbb, ins);
12609 link_bblock (cfg, cfg->cbb, end_bblock);
12610 start_new_bblock = 1;
12614 case CEE_MONO_CISINST:
12615 case CEE_MONO_CCASTCLASS: {
12620 token = read32 (ip + 2);
12621 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12622 if (ip [1] == CEE_MONO_CISINST)
12623 ins = handle_cisinst (cfg, klass, sp [0]);
12625 ins = handle_ccastclass (cfg, klass, sp [0]);
12630 case CEE_MONO_SAVE_LMF:
12631 case CEE_MONO_RESTORE_LMF:
12634 case CEE_MONO_CLASSCONST:
12635 CHECK_STACK_OVF (1);
12637 token = read32 (ip + 2);
12638 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12641 inline_costs += 10 * num_calls++;
12643 case CEE_MONO_NOT_TAKEN:
12644 cfg->cbb->out_of_line = TRUE;
12647 case CEE_MONO_TLS: {
12650 CHECK_STACK_OVF (1);
12652 key = (MonoTlsKey)read32 (ip + 2);
12653 g_assert (key < TLS_KEY_NUM);
12655 ins = mono_create_tls_get (cfg, key);
12657 if (cfg->compile_aot) {
12659 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12660 ins->dreg = alloc_preg (cfg);
12661 ins->type = STACK_PTR;
12663 g_assert_not_reached ();
12666 ins->type = STACK_PTR;
12667 MONO_ADD_INS (cfg->cbb, ins);
12672 case CEE_MONO_DYN_CALL: {
12673 MonoCallInst *call;
12675 /* It would be easier to call a trampoline, but that would put an
12676 * extra frame on the stack, confusing exception handling. So
12677 * implement it inline using an opcode for now.
12680 if (!cfg->dyn_call_var) {
12681 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12682 /* prevent it from being register allocated */
12683 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12686 /* Has to use a call inst since it local regalloc expects it */
12687 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12688 ins = (MonoInst*)call;
12690 ins->sreg1 = sp [0]->dreg;
12691 ins->sreg2 = sp [1]->dreg;
12692 MONO_ADD_INS (cfg->cbb, ins);
12694 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12697 inline_costs += 10 * num_calls++;
12701 case CEE_MONO_MEMORY_BARRIER: {
12703 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12707 case CEE_MONO_ATOMIC_STORE_I4: {
12708 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12714 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12715 ins->dreg = sp [0]->dreg;
12716 ins->sreg1 = sp [1]->dreg;
12717 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12718 MONO_ADD_INS (cfg->cbb, ins);
12723 case CEE_MONO_JIT_ATTACH: {
12724 MonoInst *args [16], *domain_ins;
12725 MonoInst *ad_ins, *jit_tls_ins;
12726 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12728 g_assert (!mono_threads_is_coop_enabled ());
12730 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12732 EMIT_NEW_PCONST (cfg, ins, NULL);
12733 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12735 ad_ins = mono_get_domain_intrinsic (cfg);
12736 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12738 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12739 NEW_BBLOCK (cfg, next_bb);
12740 NEW_BBLOCK (cfg, call_bb);
12742 if (cfg->compile_aot) {
12743 /* AOT code is only used in the root domain */
12744 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12746 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12748 MONO_ADD_INS (cfg->cbb, ad_ins);
12749 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12750 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12752 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12756 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12757 MONO_START_BB (cfg, call_bb);
12760 /* AOT code is only used in the root domain */
12761 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12762 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12763 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12766 MONO_START_BB (cfg, next_bb);
12772 case CEE_MONO_JIT_DETACH: {
12773 MonoInst *args [16];
12775 /* Restore the original domain */
12776 dreg = alloc_ireg (cfg);
12777 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12778 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12782 case CEE_MONO_CALLI_EXTRA_ARG: {
12784 MonoMethodSignature *fsig;
12788 * This is the same as CEE_CALLI, but passes an additional argument
12789 * to the called method in llvmonly mode.
12790 * This is only used by delegate invoke wrappers to call the
12791 * actual delegate method.
12793 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12796 token = read32 (ip + 2);
12804 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12807 if (cfg->llvm_only)
12808 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12810 n = fsig->param_count + fsig->hasthis + 1;
12817 if (cfg->llvm_only) {
12819 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12820 * cconv. This is set by mono_init_delegate ().
12822 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12823 MonoInst *callee = addr;
12824 MonoInst *call, *localloc_ins;
12825 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12826 int low_bit_reg = alloc_preg (cfg);
12828 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12829 NEW_BBLOCK (cfg, end_bb);
12831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12832 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12835 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12836 addr = emit_get_rgctx_sig (cfg, context_used,
12837 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12839 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12841 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12842 ins->dreg = alloc_preg (cfg);
12843 ins->inst_imm = 2 * SIZEOF_VOID_P;
12844 MONO_ADD_INS (cfg->cbb, ins);
12845 localloc_ins = ins;
12846 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12850 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12851 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12853 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12854 MONO_START_BB (cfg, is_gsharedvt_bb);
12855 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12856 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12857 ins->dreg = call->dreg;
12859 MONO_START_BB (cfg, end_bb);
12861 /* Caller uses a normal calling conv */
12863 MonoInst *callee = addr;
12864 MonoInst *call, *localloc_ins;
12865 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12866 int low_bit_reg = alloc_preg (cfg);
12868 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12869 NEW_BBLOCK (cfg, end_bb);
12871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12875 /* Normal case: callee uses a normal cconv, no conversion is needed */
12876 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12877 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12878 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12879 MONO_START_BB (cfg, is_gsharedvt_bb);
12880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12881 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12882 MONO_ADD_INS (cfg->cbb, addr);
12884 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12886 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12887 ins->dreg = alloc_preg (cfg);
12888 ins->inst_imm = 2 * SIZEOF_VOID_P;
12889 MONO_ADD_INS (cfg->cbb, ins);
12890 localloc_ins = ins;
12891 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12892 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12893 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12895 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12896 ins->dreg = call->dreg;
12897 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12899 MONO_START_BB (cfg, end_bb);
12902 /* Same as CEE_CALLI */
12903 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12905 * We pass the address to the gsharedvt trampoline in the rgctx reg
12907 MonoInst *callee = addr;
12909 addr = emit_get_rgctx_sig (cfg, context_used,
12910 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12911 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12913 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12917 if (!MONO_TYPE_IS_VOID (fsig->ret))
12918 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12920 CHECK_CFG_EXCEPTION;
12924 constrained_class = NULL;
12927 case CEE_MONO_LDDOMAIN:
12928 CHECK_STACK_OVF (1);
12929 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12934 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12940 case CEE_PREFIX1: {
12943 case CEE_ARGLIST: {
12944 /* somewhat similar to LDTOKEN */
12945 MonoInst *addr, *vtvar;
12946 CHECK_STACK_OVF (1);
12947 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12949 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12950 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12952 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12953 ins->type = STACK_VTYPE;
12954 ins->klass = mono_defaults.argumenthandle_class;
12964 MonoInst *cmp, *arg1, *arg2;
12972 * The following transforms:
12973 * CEE_CEQ into OP_CEQ
12974 * CEE_CGT into OP_CGT
12975 * CEE_CGT_UN into OP_CGT_UN
12976 * CEE_CLT into OP_CLT
12977 * CEE_CLT_UN into OP_CLT_UN
12979 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12981 MONO_INST_NEW (cfg, ins, cmp->opcode);
12982 cmp->sreg1 = arg1->dreg;
12983 cmp->sreg2 = arg2->dreg;
12984 type_from_op (cfg, cmp, arg1, arg2);
12986 add_widen_op (cfg, cmp, &arg1, &arg2);
12987 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12988 cmp->opcode = OP_LCOMPARE;
12989 else if (arg1->type == STACK_R4)
12990 cmp->opcode = OP_RCOMPARE;
12991 else if (arg1->type == STACK_R8)
12992 cmp->opcode = OP_FCOMPARE;
12994 cmp->opcode = OP_ICOMPARE;
12995 MONO_ADD_INS (cfg->cbb, cmp);
12996 ins->type = STACK_I4;
12997 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12998 type_from_op (cfg, ins, arg1, arg2);
13000 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13002 * The backends expect the fceq opcodes to do the
13005 ins->sreg1 = cmp->sreg1;
13006 ins->sreg2 = cmp->sreg2;
13009 MONO_ADD_INS (cfg->cbb, ins);
13015 MonoInst *argconst;
13016 MonoMethod *cil_method;
13018 CHECK_STACK_OVF (1);
13020 n = read32 (ip + 2);
13021 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13024 mono_class_init (cmethod->klass);
13026 mono_save_token_info (cfg, image, n, cmethod);
13028 context_used = mini_method_check_context_used (cfg, cmethod);
13030 cil_method = cmethod;
13031 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13032 emit_method_access_failure (cfg, method, cil_method);
13034 if (mono_security_core_clr_enabled ())
13035 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13038 * Optimize the common case of ldftn+delegate creation
13040 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13041 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13042 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13043 MonoInst *target_ins, *handle_ins;
13044 MonoMethod *invoke;
13045 int invoke_context_used;
13047 invoke = mono_get_delegate_invoke (ctor_method->klass);
13048 if (!invoke || !mono_method_signature (invoke))
13051 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13053 target_ins = sp [-1];
13055 if (mono_security_core_clr_enabled ())
13056 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13058 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13059 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13060 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13062 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13066 /* FIXME: SGEN support */
13067 if (invoke_context_used == 0 || cfg->llvm_only) {
13069 if (cfg->verbose_level > 3)
13070 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13071 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13074 CHECK_CFG_EXCEPTION;
13084 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13085 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13089 inline_costs += 10 * num_calls++;
13092 case CEE_LDVIRTFTN: {
13093 MonoInst *args [2];
13097 n = read32 (ip + 2);
13098 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13101 mono_class_init (cmethod->klass);
13103 context_used = mini_method_check_context_used (cfg, cmethod);
13105 if (mono_security_core_clr_enabled ())
13106 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13109 * Optimize the common case of ldvirtftn+delegate creation
13111 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13112 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13113 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13114 MonoInst *target_ins, *handle_ins;
13115 MonoMethod *invoke;
13116 int invoke_context_used;
13117 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13119 invoke = mono_get_delegate_invoke (ctor_method->klass);
13120 if (!invoke || !mono_method_signature (invoke))
13123 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13125 target_ins = sp [-1];
13127 if (mono_security_core_clr_enabled ())
13128 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13130 /* FIXME: SGEN support */
13131 if (invoke_context_used == 0 || cfg->llvm_only) {
13133 if (cfg->verbose_level > 3)
13134 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13135 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13138 CHECK_CFG_EXCEPTION;
13151 args [1] = emit_get_rgctx_method (cfg, context_used,
13152 cmethod, MONO_RGCTX_INFO_METHOD);
13155 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13157 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13160 inline_costs += 10 * num_calls++;
13164 CHECK_STACK_OVF (1);
13166 n = read16 (ip + 2);
13168 EMIT_NEW_ARGLOAD (cfg, ins, n);
13173 CHECK_STACK_OVF (1);
13175 n = read16 (ip + 2);
13177 NEW_ARGLOADA (cfg, ins, n);
13178 MONO_ADD_INS (cfg->cbb, ins);
13186 n = read16 (ip + 2);
13188 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13190 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13194 CHECK_STACK_OVF (1);
13196 n = read16 (ip + 2);
13198 EMIT_NEW_LOCLOAD (cfg, ins, n);
13203 unsigned char *tmp_ip;
13204 CHECK_STACK_OVF (1);
13206 n = read16 (ip + 2);
13209 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13215 EMIT_NEW_LOCLOADA (cfg, ins, n);
13224 n = read16 (ip + 2);
13226 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13228 emit_stloc_ir (cfg, sp, header, n);
13235 if (sp != stack_start)
13237 if (cfg->method != method)
13239 * Inlining this into a loop in a parent could lead to
13240 * stack overflows which is different behavior than the
13241 * non-inlined case, thus disable inlining in this case.
13243 INLINE_FAILURE("localloc");
13245 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13246 ins->dreg = alloc_preg (cfg);
13247 ins->sreg1 = sp [0]->dreg;
13248 ins->type = STACK_PTR;
13249 MONO_ADD_INS (cfg->cbb, ins);
13251 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13253 ins->flags |= MONO_INST_INIT;
13258 case CEE_ENDFILTER: {
13259 MonoExceptionClause *clause, *nearest;
13264 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13266 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13267 ins->sreg1 = (*sp)->dreg;
13268 MONO_ADD_INS (cfg->cbb, ins);
13269 start_new_bblock = 1;
13273 for (cc = 0; cc < header->num_clauses; ++cc) {
13274 clause = &header->clauses [cc];
13275 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13276 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13277 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13280 g_assert (nearest);
13281 if ((ip - header->code) != nearest->handler_offset)
13286 case CEE_UNALIGNED_:
13287 ins_flag |= MONO_INST_UNALIGNED;
13288 /* FIXME: record alignment? we can assume 1 for now */
13292 case CEE_VOLATILE_:
13293 ins_flag |= MONO_INST_VOLATILE;
13297 ins_flag |= MONO_INST_TAILCALL;
13298 cfg->flags |= MONO_CFG_HAS_TAIL;
13299 /* Can't inline tail calls at this time */
13300 inline_costs += 100000;
13307 token = read32 (ip + 2);
13308 klass = mini_get_class (method, token, generic_context);
13309 CHECK_TYPELOAD (klass);
13310 if (generic_class_is_reference_type (cfg, klass))
13311 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13313 mini_emit_initobj (cfg, *sp, NULL, klass);
13317 case CEE_CONSTRAINED_:
13319 token = read32 (ip + 2);
13320 constrained_class = mini_get_class (method, token, generic_context);
13321 CHECK_TYPELOAD (constrained_class);
13325 case CEE_INITBLK: {
13326 MonoInst *iargs [3];
13330 /* Skip optimized paths for volatile operations. */
13331 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13332 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13333 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13334 /* emit_memset only works when val == 0 */
13335 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13338 iargs [0] = sp [0];
13339 iargs [1] = sp [1];
13340 iargs [2] = sp [2];
13341 if (ip [1] == CEE_CPBLK) {
13343 * FIXME: It's unclear whether we should be emitting both the acquire
13344 * and release barriers for cpblk. It is technically both a load and
13345 * store operation, so it seems like that's the sensible thing to do.
13347 * FIXME: We emit full barriers on both sides of the operation for
13348 * simplicity. We should have a separate atomic memcpy method instead.
13350 MonoMethod *memcpy_method = get_memcpy_method ();
13352 if (ins_flag & MONO_INST_VOLATILE)
13353 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13355 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13356 call->flags |= ins_flag;
13358 if (ins_flag & MONO_INST_VOLATILE)
13359 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13361 MonoMethod *memset_method = get_memset_method ();
13362 if (ins_flag & MONO_INST_VOLATILE) {
13363 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13364 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13366 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13367 call->flags |= ins_flag;
13378 ins_flag |= MONO_INST_NOTYPECHECK;
13380 ins_flag |= MONO_INST_NORANGECHECK;
13381 /* we ignore the no-nullcheck for now since we
13382 * really do it explicitly only when doing callvirt->call
13386 case CEE_RETHROW: {
13388 int handler_offset = -1;
13390 for (i = 0; i < header->num_clauses; ++i) {
13391 MonoExceptionClause *clause = &header->clauses [i];
13392 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13393 handler_offset = clause->handler_offset;
13398 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13400 if (handler_offset == -1)
13403 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13404 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13405 ins->sreg1 = load->dreg;
13406 MONO_ADD_INS (cfg->cbb, ins);
13408 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13409 MONO_ADD_INS (cfg->cbb, ins);
13412 link_bblock (cfg, cfg->cbb, end_bblock);
13413 start_new_bblock = 1;
13421 CHECK_STACK_OVF (1);
13423 token = read32 (ip + 2);
13424 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13425 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13428 val = mono_type_size (type, &ialign);
13430 MonoClass *klass = mini_get_class (method, token, generic_context);
13431 CHECK_TYPELOAD (klass);
13433 val = mono_type_size (&klass->byval_arg, &ialign);
13435 if (mini_is_gsharedvt_klass (klass))
13436 GSHAREDVT_FAILURE (*ip);
13438 EMIT_NEW_ICONST (cfg, ins, val);
13443 case CEE_REFANYTYPE: {
13444 MonoInst *src_var, *src;
13446 GSHAREDVT_FAILURE (*ip);
13452 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13454 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13455 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13456 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13461 case CEE_READONLY_:
13474 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13484 g_warning ("opcode 0x%02x not handled", *ip);
13488 if (start_new_bblock != 1)
13491 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13492 if (cfg->cbb->next_bb) {
13493 /* This could already be set because of inlining, #693905 */
13494 MonoBasicBlock *bb = cfg->cbb;
13496 while (bb->next_bb)
13498 bb->next_bb = end_bblock;
13500 cfg->cbb->next_bb = end_bblock;
13503 if (cfg->method == method && cfg->domainvar) {
13505 MonoInst *get_domain;
13507 cfg->cbb = init_localsbb;
13509 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13510 MONO_ADD_INS (cfg->cbb, get_domain);
13512 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13514 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13515 MONO_ADD_INS (cfg->cbb, store);
13518 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13519 if (cfg->compile_aot)
13520 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13521 mono_get_got_var (cfg);
13524 if (cfg->method == method && cfg->got_var)
13525 mono_emit_load_got_addr (cfg);
13527 if (init_localsbb) {
13528 cfg->cbb = init_localsbb;
13530 for (i = 0; i < header->num_locals; ++i) {
13531 emit_init_local (cfg, i, header->locals [i], init_locals);
13535 if (cfg->init_ref_vars && cfg->method == method) {
13536 /* Emit initialization for ref vars */
13537 // FIXME: Avoid duplication initialization for IL locals.
13538 for (i = 0; i < cfg->num_varinfo; ++i) {
13539 MonoInst *ins = cfg->varinfo [i];
13541 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13542 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13546 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13547 cfg->cbb = init_localsbb;
13548 emit_push_lmf (cfg);
13551 cfg->cbb = init_localsbb;
13552 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13555 MonoBasicBlock *bb;
13558 * Make seq points at backward branch targets interruptable.
13560 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13561 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13562 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13565 /* Add a sequence point for method entry/exit events */
13566 if (seq_points && cfg->gen_sdb_seq_points) {
13567 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13568 MONO_ADD_INS (init_localsbb, ins);
13569 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13570 MONO_ADD_INS (cfg->bb_exit, ins);
13574 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13575 * the code they refer to was dead (#11880).
13577 if (sym_seq_points) {
13578 for (i = 0; i < header->code_size; ++i) {
13579 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13582 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13583 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13590 if (cfg->method == method) {
13591 MonoBasicBlock *bb;
13592 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13593 bb->region = mono_find_block_region (cfg, bb->real_offset);
13595 mono_create_spvar_for_region (cfg, bb->region);
13596 if (cfg->verbose_level > 2)
13597 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13600 MonoBasicBlock *bb;
13601 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13602 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13603 bb->real_offset = inline_offset;
13607 if (inline_costs < 0) {
13610 /* Method is too large */
13611 mname = mono_method_full_name (method, TRUE);
13612 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13616 if ((cfg->verbose_level > 2) && (cfg->method == method))
13617 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13622 g_assert (!mono_error_ok (&cfg->error));
13626 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13630 set_exception_type_from_invalid_il (cfg, method, ip);
13634 g_slist_free (class_inits);
13635 mono_basic_block_free (original_bb);
13636 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13637 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13638 if (cfg->exception_type)
13641 return inline_costs;
13645 store_membase_reg_to_store_membase_imm (int opcode)
13648 case OP_STORE_MEMBASE_REG:
13649 return OP_STORE_MEMBASE_IMM;
13650 case OP_STOREI1_MEMBASE_REG:
13651 return OP_STOREI1_MEMBASE_IMM;
13652 case OP_STOREI2_MEMBASE_REG:
13653 return OP_STOREI2_MEMBASE_IMM;
13654 case OP_STOREI4_MEMBASE_REG:
13655 return OP_STOREI4_MEMBASE_IMM;
13656 case OP_STOREI8_MEMBASE_REG:
13657 return OP_STOREI8_MEMBASE_IMM;
13659 g_assert_not_reached ();
13666 mono_op_to_op_imm (int opcode)
13670 return OP_IADD_IMM;
13672 return OP_ISUB_IMM;
13674 return OP_IDIV_IMM;
13676 return OP_IDIV_UN_IMM;
13678 return OP_IREM_IMM;
13680 return OP_IREM_UN_IMM;
13682 return OP_IMUL_IMM;
13684 return OP_IAND_IMM;
13688 return OP_IXOR_IMM;
13690 return OP_ISHL_IMM;
13692 return OP_ISHR_IMM;
13694 return OP_ISHR_UN_IMM;
13697 return OP_LADD_IMM;
13699 return OP_LSUB_IMM;
13701 return OP_LAND_IMM;
13705 return OP_LXOR_IMM;
13707 return OP_LSHL_IMM;
13709 return OP_LSHR_IMM;
13711 return OP_LSHR_UN_IMM;
13712 #if SIZEOF_REGISTER == 8
13714 return OP_LREM_IMM;
13718 return OP_COMPARE_IMM;
13720 return OP_ICOMPARE_IMM;
13722 return OP_LCOMPARE_IMM;
13724 case OP_STORE_MEMBASE_REG:
13725 return OP_STORE_MEMBASE_IMM;
13726 case OP_STOREI1_MEMBASE_REG:
13727 return OP_STOREI1_MEMBASE_IMM;
13728 case OP_STOREI2_MEMBASE_REG:
13729 return OP_STOREI2_MEMBASE_IMM;
13730 case OP_STOREI4_MEMBASE_REG:
13731 return OP_STOREI4_MEMBASE_IMM;
13733 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13735 return OP_X86_PUSH_IMM;
13736 case OP_X86_COMPARE_MEMBASE_REG:
13737 return OP_X86_COMPARE_MEMBASE_IMM;
13739 #if defined(TARGET_AMD64)
13740 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13741 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13743 case OP_VOIDCALL_REG:
13744 return OP_VOIDCALL;
13752 return OP_LOCALLOC_IMM;
13759 ldind_to_load_membase (int opcode)
13763 return OP_LOADI1_MEMBASE;
13765 return OP_LOADU1_MEMBASE;
13767 return OP_LOADI2_MEMBASE;
13769 return OP_LOADU2_MEMBASE;
13771 return OP_LOADI4_MEMBASE;
13773 return OP_LOADU4_MEMBASE;
13775 return OP_LOAD_MEMBASE;
13776 case CEE_LDIND_REF:
13777 return OP_LOAD_MEMBASE;
13779 return OP_LOADI8_MEMBASE;
13781 return OP_LOADR4_MEMBASE;
13783 return OP_LOADR8_MEMBASE;
13785 g_assert_not_reached ();
13792 stind_to_store_membase (int opcode)
13796 return OP_STOREI1_MEMBASE_REG;
13798 return OP_STOREI2_MEMBASE_REG;
13800 return OP_STOREI4_MEMBASE_REG;
13802 case CEE_STIND_REF:
13803 return OP_STORE_MEMBASE_REG;
13805 return OP_STOREI8_MEMBASE_REG;
13807 return OP_STORER4_MEMBASE_REG;
13809 return OP_STORER8_MEMBASE_REG;
13811 g_assert_not_reached ();
13818 mono_load_membase_to_load_mem (int opcode)
13820 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13821 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13823 case OP_LOAD_MEMBASE:
13824 return OP_LOAD_MEM;
13825 case OP_LOADU1_MEMBASE:
13826 return OP_LOADU1_MEM;
13827 case OP_LOADU2_MEMBASE:
13828 return OP_LOADU2_MEM;
13829 case OP_LOADI4_MEMBASE:
13830 return OP_LOADI4_MEM;
13831 case OP_LOADU4_MEMBASE:
13832 return OP_LOADU4_MEM;
13833 #if SIZEOF_REGISTER == 8
13834 case OP_LOADI8_MEMBASE:
13835 return OP_LOADI8_MEM;
13844 op_to_op_dest_membase (int store_opcode, int opcode)
13846 #if defined(TARGET_X86)
13847 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13852 return OP_X86_ADD_MEMBASE_REG;
13854 return OP_X86_SUB_MEMBASE_REG;
13856 return OP_X86_AND_MEMBASE_REG;
13858 return OP_X86_OR_MEMBASE_REG;
13860 return OP_X86_XOR_MEMBASE_REG;
13863 return OP_X86_ADD_MEMBASE_IMM;
13866 return OP_X86_SUB_MEMBASE_IMM;
13869 return OP_X86_AND_MEMBASE_IMM;
13872 return OP_X86_OR_MEMBASE_IMM;
13875 return OP_X86_XOR_MEMBASE_IMM;
13881 #if defined(TARGET_AMD64)
13882 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13887 return OP_X86_ADD_MEMBASE_REG;
13889 return OP_X86_SUB_MEMBASE_REG;
13891 return OP_X86_AND_MEMBASE_REG;
13893 return OP_X86_OR_MEMBASE_REG;
13895 return OP_X86_XOR_MEMBASE_REG;
13897 return OP_X86_ADD_MEMBASE_IMM;
13899 return OP_X86_SUB_MEMBASE_IMM;
13901 return OP_X86_AND_MEMBASE_IMM;
13903 return OP_X86_OR_MEMBASE_IMM;
13905 return OP_X86_XOR_MEMBASE_IMM;
13907 return OP_AMD64_ADD_MEMBASE_REG;
13909 return OP_AMD64_SUB_MEMBASE_REG;
13911 return OP_AMD64_AND_MEMBASE_REG;
13913 return OP_AMD64_OR_MEMBASE_REG;
13915 return OP_AMD64_XOR_MEMBASE_REG;
13918 return OP_AMD64_ADD_MEMBASE_IMM;
13921 return OP_AMD64_SUB_MEMBASE_IMM;
13924 return OP_AMD64_AND_MEMBASE_IMM;
13927 return OP_AMD64_OR_MEMBASE_IMM;
13930 return OP_AMD64_XOR_MEMBASE_IMM;
13940 op_to_op_store_membase (int store_opcode, int opcode)
13942 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13945 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13946 return OP_X86_SETEQ_MEMBASE;
13948 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13949 return OP_X86_SETNE_MEMBASE;
13957 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13960 /* FIXME: This has sign extension issues */
13962 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13963 return OP_X86_COMPARE_MEMBASE8_IMM;
13966 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13971 return OP_X86_PUSH_MEMBASE;
13972 case OP_COMPARE_IMM:
13973 case OP_ICOMPARE_IMM:
13974 return OP_X86_COMPARE_MEMBASE_IMM;
13977 return OP_X86_COMPARE_MEMBASE_REG;
13981 #ifdef TARGET_AMD64
13982 /* FIXME: This has sign extension issues */
13984 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13985 return OP_X86_COMPARE_MEMBASE8_IMM;
13990 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13991 return OP_X86_PUSH_MEMBASE;
13993 /* FIXME: This only works for 32 bit immediates
13994 case OP_COMPARE_IMM:
13995 case OP_LCOMPARE_IMM:
13996 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13997 return OP_AMD64_COMPARE_MEMBASE_IMM;
13999 case OP_ICOMPARE_IMM:
14000 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14001 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14005 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14006 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14007 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14008 return OP_AMD64_COMPARE_MEMBASE_REG;
14011 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14012 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14021 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14024 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14030 return OP_X86_COMPARE_REG_MEMBASE;
14032 return OP_X86_ADD_REG_MEMBASE;
14034 return OP_X86_SUB_REG_MEMBASE;
14036 return OP_X86_AND_REG_MEMBASE;
14038 return OP_X86_OR_REG_MEMBASE;
14040 return OP_X86_XOR_REG_MEMBASE;
14044 #ifdef TARGET_AMD64
14045 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14048 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14050 return OP_X86_ADD_REG_MEMBASE;
14052 return OP_X86_SUB_REG_MEMBASE;
14054 return OP_X86_AND_REG_MEMBASE;
14056 return OP_X86_OR_REG_MEMBASE;
14058 return OP_X86_XOR_REG_MEMBASE;
14060 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14064 return OP_AMD64_COMPARE_REG_MEMBASE;
14066 return OP_AMD64_ADD_REG_MEMBASE;
14068 return OP_AMD64_SUB_REG_MEMBASE;
14070 return OP_AMD64_AND_REG_MEMBASE;
14072 return OP_AMD64_OR_REG_MEMBASE;
14074 return OP_AMD64_XOR_REG_MEMBASE;
14083 mono_op_to_op_imm_noemul (int opcode)
14086 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14092 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14099 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14104 return mono_op_to_op_imm (opcode);
14109 * mono_handle_global_vregs:
14111 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14115 mono_handle_global_vregs (MonoCompile *cfg)
14117 gint32 *vreg_to_bb;
14118 MonoBasicBlock *bb;
14121 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14123 #ifdef MONO_ARCH_SIMD_INTRINSICS
14124 if (cfg->uses_simd_intrinsics)
14125 mono_simd_simplify_indirection (cfg);
14128 /* Find local vregs used in more than one bb */
14129 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14130 MonoInst *ins = bb->code;
14131 int block_num = bb->block_num;
14133 if (cfg->verbose_level > 2)
14134 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14137 for (; ins; ins = ins->next) {
14138 const char *spec = INS_INFO (ins->opcode);
14139 int regtype = 0, regindex;
14142 if (G_UNLIKELY (cfg->verbose_level > 2))
14143 mono_print_ins (ins);
14145 g_assert (ins->opcode >= MONO_CEE_LAST);
14147 for (regindex = 0; regindex < 4; regindex ++) {
14150 if (regindex == 0) {
14151 regtype = spec [MONO_INST_DEST];
14152 if (regtype == ' ')
14155 } else if (regindex == 1) {
14156 regtype = spec [MONO_INST_SRC1];
14157 if (regtype == ' ')
14160 } else if (regindex == 2) {
14161 regtype = spec [MONO_INST_SRC2];
14162 if (regtype == ' ')
14165 } else if (regindex == 3) {
14166 regtype = spec [MONO_INST_SRC3];
14167 if (regtype == ' ')
14172 #if SIZEOF_REGISTER == 4
14173 /* In the LLVM case, the long opcodes are not decomposed */
14174 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14176 * Since some instructions reference the original long vreg,
14177 * and some reference the two component vregs, it is quite hard
14178 * to determine when it needs to be global. So be conservative.
14180 if (!get_vreg_to_inst (cfg, vreg)) {
14181 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14183 if (cfg->verbose_level > 2)
14184 printf ("LONG VREG R%d made global.\n", vreg);
14188 * Make the component vregs volatile since the optimizations can
14189 * get confused otherwise.
14191 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14192 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14196 g_assert (vreg != -1);
14198 prev_bb = vreg_to_bb [vreg];
14199 if (prev_bb == 0) {
14200 /* 0 is a valid block num */
14201 vreg_to_bb [vreg] = block_num + 1;
14202 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14203 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14206 if (!get_vreg_to_inst (cfg, vreg)) {
14207 if (G_UNLIKELY (cfg->verbose_level > 2))
14208 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14212 if (vreg_is_ref (cfg, vreg))
14213 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14215 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14218 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14221 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14224 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14227 g_assert_not_reached ();
14231 /* Flag as having been used in more than one bb */
14232 vreg_to_bb [vreg] = -1;
14238 /* If a variable is used in only one bblock, convert it into a local vreg */
14239 for (i = 0; i < cfg->num_varinfo; i++) {
14240 MonoInst *var = cfg->varinfo [i];
14241 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14243 switch (var->type) {
14249 #if SIZEOF_REGISTER == 8
14252 #if !defined(TARGET_X86)
14253 /* Enabling this screws up the fp stack on x86 */
14256 if (mono_arch_is_soft_float ())
14260 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14264 /* Arguments are implicitly global */
14265 /* Putting R4 vars into registers doesn't work currently */
14266 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14267 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14269 * Make that the variable's liveness interval doesn't contain a call, since
14270 * that would cause the lvreg to be spilled, making the whole optimization
14273 /* This is too slow for JIT compilation */
14275 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14277 int def_index, call_index, ins_index;
14278 gboolean spilled = FALSE;
14283 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14284 const char *spec = INS_INFO (ins->opcode);
14286 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14287 def_index = ins_index;
14289 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14290 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14291 if (call_index > def_index) {
14297 if (MONO_IS_CALL (ins))
14298 call_index = ins_index;
14308 if (G_UNLIKELY (cfg->verbose_level > 2))
14309 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14310 var->flags |= MONO_INST_IS_DEAD;
14311 cfg->vreg_to_inst [var->dreg] = NULL;
14318 * Compress the varinfo and vars tables so the liveness computation is faster and
14319 * takes up less space.
14322 for (i = 0; i < cfg->num_varinfo; ++i) {
14323 MonoInst *var = cfg->varinfo [i];
14324 if (pos < i && cfg->locals_start == i)
14325 cfg->locals_start = pos;
14326 if (!(var->flags & MONO_INST_IS_DEAD)) {
14328 cfg->varinfo [pos] = cfg->varinfo [i];
14329 cfg->varinfo [pos]->inst_c0 = pos;
14330 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14331 cfg->vars [pos].idx = pos;
14332 #if SIZEOF_REGISTER == 4
14333 if (cfg->varinfo [pos]->type == STACK_I8) {
14334 /* Modify the two component vars too */
14337 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14338 var1->inst_c0 = pos;
14339 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14340 var1->inst_c0 = pos;
14347 cfg->num_varinfo = pos;
14348 if (cfg->locals_start > cfg->num_varinfo)
14349 cfg->locals_start = cfg->num_varinfo;
14353 * mono_allocate_gsharedvt_vars:
14355 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14356 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14359 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14363 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14365 for (i = 0; i < cfg->num_varinfo; ++i) {
14366 MonoInst *ins = cfg->varinfo [i];
14369 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14370 if (i >= cfg->locals_start) {
14372 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14373 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14374 ins->opcode = OP_GSHAREDVT_LOCAL;
14375 ins->inst_imm = idx;
14378 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14379 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14386 * mono_spill_global_vars:
14388 * Generate spill code for variables which are not allocated to registers,
14389 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14390 * code is generated which could be optimized by the local optimization passes.
14393 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14395 MonoBasicBlock *bb;
14397 int orig_next_vreg;
14398 guint32 *vreg_to_lvreg;
14400 guint32 i, lvregs_len;
14401 gboolean dest_has_lvreg = FALSE;
14402 MonoStackType stacktypes [128];
14403 MonoInst **live_range_start, **live_range_end;
14404 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14406 *need_local_opts = FALSE;
14408 memset (spec2, 0, sizeof (spec2));
14410 /* FIXME: Move this function to mini.c */
14411 stacktypes ['i'] = STACK_PTR;
14412 stacktypes ['l'] = STACK_I8;
14413 stacktypes ['f'] = STACK_R8;
14414 #ifdef MONO_ARCH_SIMD_INTRINSICS
14415 stacktypes ['x'] = STACK_VTYPE;
14418 #if SIZEOF_REGISTER == 4
14419 /* Create MonoInsts for longs */
14420 for (i = 0; i < cfg->num_varinfo; i++) {
14421 MonoInst *ins = cfg->varinfo [i];
14423 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14424 switch (ins->type) {
14429 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14432 g_assert (ins->opcode == OP_REGOFFSET);
14434 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14436 tree->opcode = OP_REGOFFSET;
14437 tree->inst_basereg = ins->inst_basereg;
14438 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14440 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14442 tree->opcode = OP_REGOFFSET;
14443 tree->inst_basereg = ins->inst_basereg;
14444 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14454 if (cfg->compute_gc_maps) {
14455 /* registers need liveness info even for !non refs */
14456 for (i = 0; i < cfg->num_varinfo; i++) {
14457 MonoInst *ins = cfg->varinfo [i];
14459 if (ins->opcode == OP_REGVAR)
14460 ins->flags |= MONO_INST_GC_TRACK;
14464 /* FIXME: widening and truncation */
14467 * As an optimization, when a variable allocated to the stack is first loaded into
14468 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14469 * the variable again.
14471 orig_next_vreg = cfg->next_vreg;
14472 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14473 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14477 * These arrays contain the first and last instructions accessing a given
14479 * Since we emit bblocks in the same order we process them here, and we
14480 * don't split live ranges, these will precisely describe the live range of
14481 * the variable, i.e. the instruction range where a valid value can be found
14482 * in the variables location.
14483 * The live range is computed using the liveness info computed by the liveness pass.
14484 * We can't use vmv->range, since that is an abstract live range, and we need
14485 * one which is instruction precise.
14486 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14488 /* FIXME: Only do this if debugging info is requested */
14489 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14490 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14491 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14492 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14494 /* Add spill loads/stores */
14495 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14498 if (cfg->verbose_level > 2)
14499 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14501 /* Clear vreg_to_lvreg array */
14502 for (i = 0; i < lvregs_len; i++)
14503 vreg_to_lvreg [lvregs [i]] = 0;
14507 MONO_BB_FOR_EACH_INS (bb, ins) {
14508 const char *spec = INS_INFO (ins->opcode);
14509 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14510 gboolean store, no_lvreg;
14511 int sregs [MONO_MAX_SRC_REGS];
14513 if (G_UNLIKELY (cfg->verbose_level > 2))
14514 mono_print_ins (ins);
14516 if (ins->opcode == OP_NOP)
14520 * We handle LDADDR here as well, since it can only be decomposed
14521 * when variable addresses are known.
14523 if (ins->opcode == OP_LDADDR) {
14524 MonoInst *var = (MonoInst *)ins->inst_p0;
14526 if (var->opcode == OP_VTARG_ADDR) {
14527 /* Happens on SPARC/S390 where vtypes are passed by reference */
14528 MonoInst *vtaddr = var->inst_left;
14529 if (vtaddr->opcode == OP_REGVAR) {
14530 ins->opcode = OP_MOVE;
14531 ins->sreg1 = vtaddr->dreg;
14533 else if (var->inst_left->opcode == OP_REGOFFSET) {
14534 ins->opcode = OP_LOAD_MEMBASE;
14535 ins->inst_basereg = vtaddr->inst_basereg;
14536 ins->inst_offset = vtaddr->inst_offset;
14539 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14540 /* gsharedvt arg passed by ref */
14541 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14543 ins->opcode = OP_LOAD_MEMBASE;
14544 ins->inst_basereg = var->inst_basereg;
14545 ins->inst_offset = var->inst_offset;
14546 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14547 MonoInst *load, *load2, *load3;
14548 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14549 int reg1, reg2, reg3;
14550 MonoInst *info_var = cfg->gsharedvt_info_var;
14551 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14555 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14558 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14560 g_assert (info_var);
14561 g_assert (locals_var);
14563 /* Mark the instruction used to compute the locals var as used */
14564 cfg->gsharedvt_locals_var_ins = NULL;
14566 /* Load the offset */
14567 if (info_var->opcode == OP_REGOFFSET) {
14568 reg1 = alloc_ireg (cfg);
14569 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14570 } else if (info_var->opcode == OP_REGVAR) {
14572 reg1 = info_var->dreg;
14574 g_assert_not_reached ();
14576 reg2 = alloc_ireg (cfg);
14577 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14578 /* Load the locals area address */
14579 reg3 = alloc_ireg (cfg);
14580 if (locals_var->opcode == OP_REGOFFSET) {
14581 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14582 } else if (locals_var->opcode == OP_REGVAR) {
14583 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14585 g_assert_not_reached ();
14587 /* Compute the address */
14588 ins->opcode = OP_PADD;
14592 mono_bblock_insert_before_ins (bb, ins, load3);
14593 mono_bblock_insert_before_ins (bb, load3, load2);
14595 mono_bblock_insert_before_ins (bb, load2, load);
14597 g_assert (var->opcode == OP_REGOFFSET);
14599 ins->opcode = OP_ADD_IMM;
14600 ins->sreg1 = var->inst_basereg;
14601 ins->inst_imm = var->inst_offset;
14604 *need_local_opts = TRUE;
14605 spec = INS_INFO (ins->opcode);
14608 if (ins->opcode < MONO_CEE_LAST) {
14609 mono_print_ins (ins);
14610 g_assert_not_reached ();
14614 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14618 if (MONO_IS_STORE_MEMBASE (ins)) {
14619 tmp_reg = ins->dreg;
14620 ins->dreg = ins->sreg2;
14621 ins->sreg2 = tmp_reg;
14624 spec2 [MONO_INST_DEST] = ' ';
14625 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14626 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14627 spec2 [MONO_INST_SRC3] = ' ';
14629 } else if (MONO_IS_STORE_MEMINDEX (ins))
14630 g_assert_not_reached ();
14635 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14636 printf ("\t %.3s %d", spec, ins->dreg);
14637 num_sregs = mono_inst_get_src_registers (ins, sregs);
14638 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14639 printf (" %d", sregs [srcindex]);
14646 regtype = spec [MONO_INST_DEST];
14647 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14650 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14651 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14652 MonoInst *store_ins;
14654 MonoInst *def_ins = ins;
14655 int dreg = ins->dreg; /* The original vreg */
14657 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14659 if (var->opcode == OP_REGVAR) {
14660 ins->dreg = var->dreg;
14661 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14663 * Instead of emitting a load+store, use a _membase opcode.
14665 g_assert (var->opcode == OP_REGOFFSET);
14666 if (ins->opcode == OP_MOVE) {
14670 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14671 ins->inst_basereg = var->inst_basereg;
14672 ins->inst_offset = var->inst_offset;
14675 spec = INS_INFO (ins->opcode);
14679 g_assert (var->opcode == OP_REGOFFSET);
14681 prev_dreg = ins->dreg;
14683 /* Invalidate any previous lvreg for this vreg */
14684 vreg_to_lvreg [ins->dreg] = 0;
14688 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14690 store_opcode = OP_STOREI8_MEMBASE_REG;
14693 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14695 #if SIZEOF_REGISTER != 8
14696 if (regtype == 'l') {
14697 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14698 mono_bblock_insert_after_ins (bb, ins, store_ins);
14699 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14700 mono_bblock_insert_after_ins (bb, ins, store_ins);
14701 def_ins = store_ins;
14706 g_assert (store_opcode != OP_STOREV_MEMBASE);
14708 /* Try to fuse the store into the instruction itself */
14709 /* FIXME: Add more instructions */
14710 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14711 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14712 ins->inst_imm = ins->inst_c0;
14713 ins->inst_destbasereg = var->inst_basereg;
14714 ins->inst_offset = var->inst_offset;
14715 spec = INS_INFO (ins->opcode);
14716 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14717 ins->opcode = store_opcode;
14718 ins->inst_destbasereg = var->inst_basereg;
14719 ins->inst_offset = var->inst_offset;
14723 tmp_reg = ins->dreg;
14724 ins->dreg = ins->sreg2;
14725 ins->sreg2 = tmp_reg;
14728 spec2 [MONO_INST_DEST] = ' ';
14729 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14730 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14731 spec2 [MONO_INST_SRC3] = ' ';
14733 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14734 // FIXME: The backends expect the base reg to be in inst_basereg
14735 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14737 ins->inst_basereg = var->inst_basereg;
14738 ins->inst_offset = var->inst_offset;
14739 spec = INS_INFO (ins->opcode);
14741 /* printf ("INS: "); mono_print_ins (ins); */
14742 /* Create a store instruction */
14743 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14745 /* Insert it after the instruction */
14746 mono_bblock_insert_after_ins (bb, ins, store_ins);
14748 def_ins = store_ins;
14751 * We can't assign ins->dreg to var->dreg here, since the
14752 * sregs could use it. So set a flag, and do it after
14755 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14756 dest_has_lvreg = TRUE;
14761 if (def_ins && !live_range_start [dreg]) {
14762 live_range_start [dreg] = def_ins;
14763 live_range_start_bb [dreg] = bb;
14766 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14769 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14770 tmp->inst_c1 = dreg;
14771 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14778 num_sregs = mono_inst_get_src_registers (ins, sregs);
14779 for (srcindex = 0; srcindex < 3; ++srcindex) {
14780 regtype = spec [MONO_INST_SRC1 + srcindex];
14781 sreg = sregs [srcindex];
14783 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14784 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14785 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14786 MonoInst *use_ins = ins;
14787 MonoInst *load_ins;
14788 guint32 load_opcode;
14790 if (var->opcode == OP_REGVAR) {
14791 sregs [srcindex] = var->dreg;
14792 //mono_inst_set_src_registers (ins, sregs);
14793 live_range_end [sreg] = use_ins;
14794 live_range_end_bb [sreg] = bb;
14796 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14799 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14800 /* var->dreg is a hreg */
14801 tmp->inst_c1 = sreg;
14802 mono_bblock_insert_after_ins (bb, ins, tmp);
14808 g_assert (var->opcode == OP_REGOFFSET);
14810 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14812 g_assert (load_opcode != OP_LOADV_MEMBASE);
14814 if (vreg_to_lvreg [sreg]) {
14815 g_assert (vreg_to_lvreg [sreg] != -1);
14817 /* The variable is already loaded to an lvreg */
14818 if (G_UNLIKELY (cfg->verbose_level > 2))
14819 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14820 sregs [srcindex] = vreg_to_lvreg [sreg];
14821 //mono_inst_set_src_registers (ins, sregs);
14825 /* Try to fuse the load into the instruction */
14826 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14827 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14828 sregs [0] = var->inst_basereg;
14829 //mono_inst_set_src_registers (ins, sregs);
14830 ins->inst_offset = var->inst_offset;
14831 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14832 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14833 sregs [1] = var->inst_basereg;
14834 //mono_inst_set_src_registers (ins, sregs);
14835 ins->inst_offset = var->inst_offset;
14837 if (MONO_IS_REAL_MOVE (ins)) {
14838 ins->opcode = OP_NOP;
14841 //printf ("%d ", srcindex); mono_print_ins (ins);
14843 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14845 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14846 if (var->dreg == prev_dreg) {
14848 * sreg refers to the value loaded by the load
14849 * emitted below, but we need to use ins->dreg
14850 * since it refers to the store emitted earlier.
14854 g_assert (sreg != -1);
14855 vreg_to_lvreg [var->dreg] = sreg;
14856 g_assert (lvregs_len < 1024);
14857 lvregs [lvregs_len ++] = var->dreg;
14861 sregs [srcindex] = sreg;
14862 //mono_inst_set_src_registers (ins, sregs);
14864 #if SIZEOF_REGISTER != 8
14865 if (regtype == 'l') {
14866 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14867 mono_bblock_insert_before_ins (bb, ins, load_ins);
14868 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14869 mono_bblock_insert_before_ins (bb, ins, load_ins);
14870 use_ins = load_ins;
14875 #if SIZEOF_REGISTER == 4
14876 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14878 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14879 mono_bblock_insert_before_ins (bb, ins, load_ins);
14880 use_ins = load_ins;
14884 if (var->dreg < orig_next_vreg) {
14885 live_range_end [var->dreg] = use_ins;
14886 live_range_end_bb [var->dreg] = bb;
14889 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14892 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14893 tmp->inst_c1 = var->dreg;
14894 mono_bblock_insert_after_ins (bb, ins, tmp);
14898 mono_inst_set_src_registers (ins, sregs);
14900 if (dest_has_lvreg) {
14901 g_assert (ins->dreg != -1);
14902 vreg_to_lvreg [prev_dreg] = ins->dreg;
14903 g_assert (lvregs_len < 1024);
14904 lvregs [lvregs_len ++] = prev_dreg;
14905 dest_has_lvreg = FALSE;
14909 tmp_reg = ins->dreg;
14910 ins->dreg = ins->sreg2;
14911 ins->sreg2 = tmp_reg;
14914 if (MONO_IS_CALL (ins)) {
14915 /* Clear vreg_to_lvreg array */
14916 for (i = 0; i < lvregs_len; i++)
14917 vreg_to_lvreg [lvregs [i]] = 0;
14919 } else if (ins->opcode == OP_NOP) {
14921 MONO_INST_NULLIFY_SREGS (ins);
14924 if (cfg->verbose_level > 2)
14925 mono_print_ins_index (1, ins);
14928 /* Extend the live range based on the liveness info */
14929 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14930 for (i = 0; i < cfg->num_varinfo; i ++) {
14931 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14933 if (vreg_is_volatile (cfg, vi->vreg))
14934 /* The liveness info is incomplete */
14937 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14938 /* Live from at least the first ins of this bb */
14939 live_range_start [vi->vreg] = bb->code;
14940 live_range_start_bb [vi->vreg] = bb;
14943 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14944 /* Live at least until the last ins of this bb */
14945 live_range_end [vi->vreg] = bb->last_ins;
14946 live_range_end_bb [vi->vreg] = bb;
14953 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14954 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14956 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14957 for (i = 0; i < cfg->num_varinfo; ++i) {
14958 int vreg = MONO_VARINFO (cfg, i)->vreg;
14961 if (live_range_start [vreg]) {
14962 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14964 ins->inst_c1 = vreg;
14965 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14967 if (live_range_end [vreg]) {
14968 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14970 ins->inst_c1 = vreg;
14971 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14972 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14974 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14979 if (cfg->gsharedvt_locals_var_ins) {
14980 /* Nullify if unused */
14981 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14982 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14985 g_free (live_range_start);
14986 g_free (live_range_end);
14987 g_free (live_range_start_bb);
14988 g_free (live_range_end_bb);
14991 static void mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
14993 MonoInst *ret, *move, *source;
14994 MonoClass *klass = ins->klass;
14995 int context_used = mini_class_check_context_used (cfg, klass);
14996 int is_isinst = ins->opcode == OP_ISINST;
14997 g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
14998 source = get_vreg_to_inst (cfg, ins->sreg1);
14999 if (!source || source == (MonoInst *) -1)
15000 source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
15001 g_assert (source && source != (MonoInst *) -1);
15003 MonoBasicBlock *first_bb;
15004 NEW_BBLOCK (cfg, first_bb);
15005 cfg->cbb = first_bb;
15007 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
15009 ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
15011 ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
15012 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
15013 MonoInst *iargs [1];
15016 iargs [0] = source;
15018 MonoMethod *wrapper = mono_marshal_get_isinst (klass);
15019 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15021 MonoMethod *wrapper = mono_marshal_get_castclass (klass);
15022 save_cast_details (cfg, klass, source->dreg, TRUE);
15023 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15024 reset_cast_details (cfg);
15026 g_assert (costs > 0);
15030 ret = handle_isinst (cfg, klass, source, context_used);
15032 ret = handle_castclass (cfg, klass, source, context_used);
15034 EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
15036 g_assert (cfg->cbb->code || first_bb->code);
15037 MonoInst *prev = ins->prev;
15038 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
15041 void mono_decompose_typechecks (MonoCompile *cfg)
15043 for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
15045 MONO_BB_FOR_EACH_INS (bb, c) {
15046 switch (c->opcode) {
15049 mono_decompose_typecheck (cfg, bb, c);
15059 * - use 'iadd' instead of 'int_add'
15060 * - handling ovf opcodes: decompose in method_to_ir.
15061 * - unify iregs/fregs
15062 * -> partly done, the missing parts are:
15063 * - a more complete unification would involve unifying the hregs as well, so
15064 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15065 * would no longer map to the machine hregs, so the code generators would need to
15066 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15067 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15068 * fp/non-fp branches speeds it up by about 15%.
15069 * - use sext/zext opcodes instead of shifts
15071 * - get rid of TEMPLOADs if possible and use vregs instead
15072 * - clean up usage of OP_P/OP_ opcodes
15073 * - cleanup usage of DUMMY_USE
15074 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15076 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15077 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15078 * - make sure handle_stack_args () is called before the branch is emitted
15079 * - when the new IR is done, get rid of all unused stuff
15080 * - COMPARE/BEQ as separate instructions or unify them ?
15081 * - keeping them separate allows specialized compare instructions like
15082 * compare_imm, compare_membase
15083 * - most back ends unify fp compare+branch, fp compare+ceq
15084 * - integrate mono_save_args into inline_method
15085 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15086 * - handle long shift opts on 32 bit platforms somehow: they require
15087 * 3 sregs (2 for arg1 and 1 for arg2)
15088 * - make byref a 'normal' type.
15089 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15090 * variable if needed.
15091 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15092 * like inline_method.
15093 * - remove inlining restrictions
15094 * - fix LNEG and enable cfold of INEG
15095 * - generalize x86 optimizations like ldelema as a peephole optimization
15096 * - add store_mem_imm for amd64
15097 * - optimize the loading of the interruption flag in the managed->native wrappers
15098 * - avoid special handling of OP_NOP in passes
15099 * - move code inserting instructions into one function/macro.
15100 * - try a coalescing phase after liveness analysis
15101 * - add float -> vreg conversion + local optimizations on !x86
15102 * - figure out how to handle decomposed branches during optimizations, ie.
15103 * compare+branch, op_jump_table+op_br etc.
15104 * - promote RuntimeXHandles to vregs
15105 * - vtype cleanups:
15106 * - add a NEW_VARLOADA_VREG macro
15107 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15108 * accessing vtype fields.
15109 * - get rid of I8CONST on 64 bit platforms
15110 * - dealing with the increase in code size due to branches created during opcode
15112 * - use extended basic blocks
15113 * - all parts of the JIT
15114 * - handle_global_vregs () && local regalloc
15115 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15116 * - sources of increase in code size:
15119 * - isinst and castclass
15120 * - lvregs not allocated to global registers even if used multiple times
15121 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15123 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15124 * - add all micro optimizations from the old JIT
15125 * - put tree optimizations into the deadce pass
15126 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15127 * specific function.
15128 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15129 * fcompare + branchCC.
15130 * - create a helper function for allocating a stack slot, taking into account
15131 * MONO_CFG_HAS_SPILLUP.
15133 * - merge the ia64 switch changes.
15134 * - optimize mono_regstate2_alloc_int/float.
15135 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15136 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15137 * parts of the tree could be separated by other instructions, killing the tree
15138 * arguments, or stores killing loads etc. Also, should we fold loads into other
15139 * instructions if the result of the load is used multiple times ?
15140 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15141 * - LAST MERGE: 108395.
15142 * - when returning vtypes in registers, generate IR and append it to the end of the
15143 * last bb instead of doing it in the epilog.
15144 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15152 - When to decompose opcodes:
15153 - earlier: this makes some optimizations hard to implement, since the low level IR
15154 no longer contains the neccessary information. But it is easier to do.
15155 - later: harder to implement, enables more optimizations.
15156 - Branches inside bblocks:
15157 - created when decomposing complex opcodes.
15158 - branches to another bblock: harmless, but not tracked by the branch
15159 optimizations, so need to branch to a label at the start of the bblock.
15160 - branches to inside the same bblock: very problematic, trips up the local
15161 reg allocator. Can be fixed by spitting the current bblock, but that is a
15162 complex operation, since some local vregs can become global vregs etc.
15163 - Local/global vregs:
15164 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15165 local register allocator.
15166 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15167 structure, created by mono_create_var (). Assigned to hregs or the stack by
15168 the global register allocator.
15169 - When to do optimizations like alu->alu_imm:
15170 - earlier -> saves work later on since the IR will be smaller/simpler
15171 - later -> can work on more instructions
15172 - Handling of valuetypes:
15173 - When a vtype is pushed on the stack, a new temporary is created, an
15174 instruction computing its address (LDADDR) is emitted and pushed on
15175 the stack. Need to optimize cases when the vtype is used immediately as in
15176 argument passing, stloc etc.
15177 - Instead of the to_end stuff in the old JIT, simply call the function handling
15178 the values on the stack before emitting the last instruction of the bb.
15181 #endif /* DISABLE_JIT */