2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
28 #ifdef HAVE_SYS_TIME_H
36 #include <mono/utils/memcheck.h>
38 #include <mono/metadata/abi-details.h>
39 #include <mono/metadata/assembly.h>
40 #include <mono/metadata/attrdefs.h>
41 #include <mono/metadata/loader.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/class.h>
44 #include <mono/metadata/object.h>
45 #include <mono/metadata/exception.h>
46 #include <mono/metadata/opcodes.h>
47 #include <mono/metadata/mono-endian.h>
48 #include <mono/metadata/tokentype.h>
49 #include <mono/metadata/tabledefs.h>
50 #include <mono/metadata/marshal.h>
51 #include <mono/metadata/debug-helpers.h>
52 #include <mono/metadata/mono-debug.h>
53 #include <mono/metadata/mono-debug-debugger.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/metadata/debug-mono-symfile.h>
62 #include <mono/utils/mono-compiler.h>
63 #include <mono/utils/mono-memory-model.h>
64 #include <mono/utils/mono-error-internals.h>
65 #include <mono/metadata/mono-basic-block.h>
66 #include <mono/metadata/reflection-internals.h>
67 #include <mono/utils/mono-threads-coop.h>
73 #include "jit-icalls.h"
75 #include "debugger-agent.h"
76 #include "seq-points.h"
77 #include "aot-compiler.h"
78 #include "mini-llvm.h"
80 #define BRANCH_COST 10
81 #define INLINE_LENGTH_LIMIT 20
83 /* These have 'cfg' as an implicit argument */
84 #define INLINE_FAILURE(msg) do { \
85 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
86 inline_failure (cfg, msg); \
87 goto exception_exit; \
90 #define CHECK_CFG_EXCEPTION do {\
91 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE(method, field) do { \
95 field_access_failure ((cfg), (method), (field)); \
96 goto exception_exit; \
98 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
101 goto exception_exit; \
104 #define GSHAREDVT_FAILURE(opcode) do { \
105 if (cfg->gsharedvt) { \
106 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
107 goto exception_exit; \
110 #define OUT_OF_MEMORY_FAILURE do { \
111 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
112 mono_error_set_out_of_memory (&cfg->error, ""); \
113 goto exception_exit; \
115 #define DISABLE_AOT(cfg) do { \
116 if ((cfg)->verbose_level >= 2) \
117 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
118 (cfg)->disable_aot = TRUE; \
120 #define LOAD_ERROR do { \
121 break_on_unverified (); \
122 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
123 goto exception_exit; \
126 #define TYPE_LOAD_ERROR(klass) do { \
127 cfg->exception_ptr = klass; \
131 #define CHECK_CFG_ERROR do {\
132 if (!mono_error_ok (&cfg->error)) { \
133 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
134 goto mono_error_exit; \
138 /* Determine whenever 'ins' represents a load of the 'this' argument */
139 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
141 static int ldind_to_load_membase (int opcode);
142 static int stind_to_store_membase (int opcode);
144 int mono_op_to_op_imm (int opcode);
145 int mono_op_to_op_imm_noemul (int opcode);
147 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
149 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
150 guchar *ip, guint real_offset, gboolean inline_always);
152 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
160 /* type loading helpers */
161 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
162 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
165 * Instruction metadata
173 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
174 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
180 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
185 /* keep in sync with the enum in mini.h */
188 #include "mini-ops.h"
193 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
194 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
196 * This should contain the index of the last sreg + 1. This is not the same
197 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
199 const gint8 ins_sreg_counts[] = {
200 #include "mini-ops.h"
205 #define MONO_INIT_VARINFO(vi,id) do { \
206 (vi)->range.first_use.pos.bid = 0xffff; \
212 mono_alloc_ireg (MonoCompile *cfg)
214 return alloc_ireg (cfg);
218 mono_alloc_lreg (MonoCompile *cfg)
220 return alloc_lreg (cfg);
224 mono_alloc_freg (MonoCompile *cfg)
226 return alloc_freg (cfg);
230 mono_alloc_preg (MonoCompile *cfg)
232 return alloc_preg (cfg);
236 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
238 return alloc_dreg (cfg, stack_type);
242 * mono_alloc_ireg_ref:
244 * Allocate an IREG, and mark it as holding a GC ref.
247 mono_alloc_ireg_ref (MonoCompile *cfg)
249 return alloc_ireg_ref (cfg);
253 * mono_alloc_ireg_mp:
255 * Allocate an IREG, and mark it as holding a managed pointer.
258 mono_alloc_ireg_mp (MonoCompile *cfg)
260 return alloc_ireg_mp (cfg);
264 * mono_alloc_ireg_copy:
266 * Allocate an IREG with the same GC type as VREG.
269 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
271 if (vreg_is_ref (cfg, vreg))
272 return alloc_ireg_ref (cfg);
273 else if (vreg_is_mp (cfg, vreg))
274 return alloc_ireg_mp (cfg);
276 return alloc_ireg (cfg);
280 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
285 type = mini_get_underlying_type (type);
287 switch (type->type) {
300 case MONO_TYPE_FNPTR:
302 case MONO_TYPE_CLASS:
303 case MONO_TYPE_STRING:
304 case MONO_TYPE_OBJECT:
305 case MONO_TYPE_SZARRAY:
306 case MONO_TYPE_ARRAY:
310 #if SIZEOF_REGISTER == 8
316 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
319 case MONO_TYPE_VALUETYPE:
320 if (type->data.klass->enumtype) {
321 type = mono_class_enum_basetype (type->data.klass);
324 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 case MONO_TYPE_TYPEDBYREF:
329 case MONO_TYPE_GENERICINST:
330 type = &type->data.generic_class->container_class->byval_arg;
334 g_assert (cfg->gshared);
335 if (mini_type_var_is_vt (type))
338 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
340 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
346 mono_print_bb (MonoBasicBlock *bb, const char *msg)
351 printf ("\n%s %d: [IN: ", msg, bb->block_num);
352 for (i = 0; i < bb->in_count; ++i)
353 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
355 for (i = 0; i < bb->out_count; ++i)
356 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *field_fname = mono_field_full_name (field);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
383 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
384 g_free (method_fname);
385 g_free (field_fname);
388 static MONO_NEVER_INLINE void
389 inline_failure (MonoCompile *cfg, const char *msg)
391 if (cfg->verbose_level >= 2)
392 printf ("inline failed: %s\n", msg);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
396 static MONO_NEVER_INLINE void
397 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
399 if (cfg->verbose_level > 2) \
400 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
404 static MONO_NEVER_INLINE void
405 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
407 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
408 if (cfg->verbose_level >= 2)
409 printf ("%s\n", cfg->exception_message);
410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
415 * foo<T> (int i) { ldarg.0; box T; }
417 #define UNVERIFIED do { \
418 if (cfg->gsharedvt) { \
419 if (cfg->verbose_level > 2) \
420 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
422 goto exception_exit; \
424 break_on_unverified (); \
428 #define GET_BBLOCK(cfg,tblock,ip) do { \
429 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
431 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
432 NEW_BBLOCK (cfg, (tblock)); \
433 (tblock)->cil_code = (ip); \
434 ADD_BBLOCK (cfg, (tblock)); \
438 #if defined(TARGET_X86) || defined(TARGET_AMD64)
439 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
440 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
441 (dest)->dreg = alloc_ireg_mp ((cfg)); \
442 (dest)->sreg1 = (sr1); \
443 (dest)->sreg2 = (sr2); \
444 (dest)->inst_imm = (imm); \
445 (dest)->backend.shift_amount = (shift); \
446 MONO_ADD_INS ((cfg)->cbb, (dest)); \
450 /* Emit conversions so both operands of a binary opcode are of the same type */
452 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
454 MonoInst *arg1 = *arg1_ref;
455 MonoInst *arg2 = *arg2_ref;
458 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
459 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
462 /* Mixing r4/r8 is allowed by the spec */
463 if (arg1->type == STACK_R4) {
464 int dreg = alloc_freg (cfg);
466 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
467 conv->type = STACK_R8;
471 if (arg2->type == STACK_R4) {
472 int dreg = alloc_freg (cfg);
474 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
475 conv->type = STACK_R8;
481 #if SIZEOF_REGISTER == 8
482 /* FIXME: Need to add many more cases */
483 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
486 int dr = alloc_preg (cfg);
487 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
488 (ins)->sreg2 = widen->dreg;
493 #define ADD_BINOP(op) do { \
494 MONO_INST_NEW (cfg, ins, (op)); \
496 ins->sreg1 = sp [0]->dreg; \
497 ins->sreg2 = sp [1]->dreg; \
498 type_from_op (cfg, ins, sp [0], sp [1]); \
500 /* Have to insert a widening op */ \
501 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
502 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
503 MONO_ADD_INS ((cfg)->cbb, (ins)); \
504 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
507 #define ADD_UNOP(op) do { \
508 MONO_INST_NEW (cfg, ins, (op)); \
510 ins->sreg1 = sp [0]->dreg; \
511 type_from_op (cfg, ins, sp [0], NULL); \
513 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode (cfg, ins); \
518 #define ADD_BINCOND(next_block) do { \
521 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
522 cmp->sreg1 = sp [0]->dreg; \
523 cmp->sreg2 = sp [1]->dreg; \
524 type_from_op (cfg, cmp, sp [0], sp [1]); \
526 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
527 type_from_op (cfg, ins, sp [0], sp [1]); \
528 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
529 GET_BBLOCK (cfg, tblock, target); \
530 link_bblock (cfg, cfg->cbb, tblock); \
531 ins->inst_true_bb = tblock; \
532 if ((next_block)) { \
533 link_bblock (cfg, cfg->cbb, (next_block)); \
534 ins->inst_false_bb = (next_block); \
535 start_new_bblock = 1; \
537 GET_BBLOCK (cfg, tblock, ip); \
538 link_bblock (cfg, cfg->cbb, tblock); \
539 ins->inst_false_bb = tblock; \
540 start_new_bblock = 2; \
542 if (sp != stack_start) { \
543 handle_stack_args (cfg, stack_start, sp - stack_start); \
544 CHECK_UNVERIFIABLE (cfg); \
546 MONO_ADD_INS (cfg->cbb, cmp); \
547 MONO_ADD_INS (cfg->cbb, ins); \
551 * link_bblock: Links two basic blocks
553 * links two basic blocks in the control flow graph, the 'from'
554 * argument is the starting block and the 'to' argument is the block
555 * the control flow ends to after 'from'.
558 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
560 MonoBasicBlock **newa;
564 if (from->cil_code) {
566 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
568 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
571 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
573 printf ("edge from entry to exit\n");
578 for (i = 0; i < from->out_count; ++i) {
579 if (to == from->out_bb [i]) {
585 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
586 for (i = 0; i < from->out_count; ++i) {
587 newa [i] = from->out_bb [i];
595 for (i = 0; i < to->in_count; ++i) {
596 if (from == to->in_bb [i]) {
602 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
603 for (i = 0; i < to->in_count; ++i) {
604 newa [i] = to->in_bb [i];
613 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
615 link_bblock (cfg, from, to);
619 * mono_find_block_region:
621 * We mark each basic block with a region ID. We use that to avoid BB
622 * optimizations when blocks are in different regions.
625 * A region token that encodes where this region is, and information
626 * about the clause owner for this block.
628 * The region encodes the try/catch/filter clause that owns this block
629 * as well as the type. -1 is a special value that represents a block
630 * that is in none of try/catch/filter.
633 mono_find_block_region (MonoCompile *cfg, int offset)
635 MonoMethodHeader *header = cfg->header;
636 MonoExceptionClause *clause;
639 for (i = 0; i < header->num_clauses; ++i) {
640 clause = &header->clauses [i];
641 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
642 (offset < (clause->handler_offset)))
643 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
645 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
646 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
647 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
648 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
649 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
651 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
654 for (i = 0; i < header->num_clauses; ++i) {
655 clause = &header->clauses [i];
657 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
658 return ((i + 1) << 8) | clause->flags;
665 ip_in_finally_clause (MonoCompile *cfg, int offset)
667 MonoMethodHeader *header = cfg->header;
668 MonoExceptionClause *clause;
671 for (i = 0; i < header->num_clauses; ++i) {
672 clause = &header->clauses [i];
673 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
676 if (MONO_OFFSET_IN_HANDLER (clause, offset))
683 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
685 MonoMethodHeader *header = cfg->header;
686 MonoExceptionClause *clause;
690 for (i = 0; i < header->num_clauses; ++i) {
691 clause = &header->clauses [i];
692 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
693 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
694 if (clause->flags == type)
695 res = g_list_append (res, clause);
702 mono_create_spvar_for_region (MonoCompile *cfg, int region)
706 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
710 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
711 /* prevent it from being register allocated */
712 var->flags |= MONO_INST_VOLATILE;
714 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
718 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
720 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
724 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
728 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
732 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
733 /* prevent it from being register allocated */
734 var->flags |= MONO_INST_VOLATILE;
736 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
742 * Returns the type used in the eval stack when @type is loaded.
743 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
746 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
750 type = mini_get_underlying_type (type);
751 inst->klass = klass = mono_class_from_mono_type (type);
753 inst->type = STACK_MP;
758 switch (type->type) {
760 inst->type = STACK_INV;
768 inst->type = STACK_I4;
773 case MONO_TYPE_FNPTR:
774 inst->type = STACK_PTR;
776 case MONO_TYPE_CLASS:
777 case MONO_TYPE_STRING:
778 case MONO_TYPE_OBJECT:
779 case MONO_TYPE_SZARRAY:
780 case MONO_TYPE_ARRAY:
781 inst->type = STACK_OBJ;
785 inst->type = STACK_I8;
788 inst->type = cfg->r4_stack_type;
791 inst->type = STACK_R8;
793 case MONO_TYPE_VALUETYPE:
794 if (type->data.klass->enumtype) {
795 type = mono_class_enum_basetype (type->data.klass);
799 inst->type = STACK_VTYPE;
802 case MONO_TYPE_TYPEDBYREF:
803 inst->klass = mono_defaults.typed_reference_class;
804 inst->type = STACK_VTYPE;
806 case MONO_TYPE_GENERICINST:
807 type = &type->data.generic_class->container_class->byval_arg;
811 g_assert (cfg->gshared);
812 if (mini_is_gsharedvt_type (type)) {
813 g_assert (cfg->gsharedvt);
814 inst->type = STACK_VTYPE;
816 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
820 g_error ("unknown type 0x%02x in eval stack type", type->type);
825 * The following tables are used to quickly validate the IL code in type_from_op ().
828 bin_num_table [STACK_MAX] [STACK_MAX] = {
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
834 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
842 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
845 /* reduce the size of this table */
847 bin_int_table [STACK_MAX] [STACK_MAX] = {
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
850 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
851 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
859 bin_comp_table [STACK_MAX] [STACK_MAX] = {
860 /* Inv i L p F & O vt r4 */
862 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
863 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
864 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
865 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
866 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
867 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
868 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
869 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
872 /* reduce the size of this table */
874 shift_table [STACK_MAX] [STACK_MAX] = {
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
877 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
878 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
886 * Tables to map from the non-specific opcode to the matching
887 * type-specific opcode.
889 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
891 binops_op_map [STACK_MAX] = {
892 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
895 /* handles from CEE_NEG to CEE_CONV_U8 */
897 unops_op_map [STACK_MAX] = {
898 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
901 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
903 ovfops_op_map [STACK_MAX] = {
904 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
907 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
909 ovf2ops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
913 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
915 ovf3ops_op_map [STACK_MAX] = {
916 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
919 /* handles from CEE_BEQ to CEE_BLT_UN */
921 beqops_op_map [STACK_MAX] = {
922 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
925 /* handles from CEE_CEQ to CEE_CLT_UN */
927 ceqops_op_map [STACK_MAX] = {
928 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
932 * Sets ins->type (the type on the eval stack) according to the
933 * type of the opcode and the arguments to it.
934 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
936 * FIXME: this function sets ins->type unconditionally in some cases, but
937 * it should set it to invalid for some types (a conv.x on an object)
940 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
942 switch (ins->opcode) {
949 /* FIXME: check unverifiable args for STACK_MP */
950 ins->type = bin_num_table [src1->type] [src2->type];
951 ins->opcode += binops_op_map [ins->type];
958 ins->type = bin_int_table [src1->type] [src2->type];
959 ins->opcode += binops_op_map [ins->type];
964 ins->type = shift_table [src1->type] [src2->type];
965 ins->opcode += binops_op_map [ins->type];
970 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE;
973 else if (src1->type == STACK_R4)
974 ins->opcode = OP_RCOMPARE;
975 else if (src1->type == STACK_R8)
976 ins->opcode = OP_FCOMPARE;
978 ins->opcode = OP_ICOMPARE;
980 case OP_ICOMPARE_IMM:
981 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
982 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
983 ins->opcode = OP_LCOMPARE_IMM;
995 ins->opcode += beqops_op_map [src1->type];
998 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1005 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1006 ins->opcode += ceqops_op_map [src1->type];
1010 ins->type = neg_table [src1->type];
1011 ins->opcode += unops_op_map [ins->type];
1014 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1015 ins->type = src1->type;
1017 ins->type = STACK_INV;
1018 ins->opcode += unops_op_map [ins->type];
1024 ins->type = STACK_I4;
1025 ins->opcode += unops_op_map [src1->type];
1028 ins->type = STACK_R8;
1029 switch (src1->type) {
1032 ins->opcode = OP_ICONV_TO_R_UN;
1035 ins->opcode = OP_LCONV_TO_R_UN;
1039 case CEE_CONV_OVF_I1:
1040 case CEE_CONV_OVF_U1:
1041 case CEE_CONV_OVF_I2:
1042 case CEE_CONV_OVF_U2:
1043 case CEE_CONV_OVF_I4:
1044 case CEE_CONV_OVF_U4:
1045 ins->type = STACK_I4;
1046 ins->opcode += ovf3ops_op_map [src1->type];
1048 case CEE_CONV_OVF_I_UN:
1049 case CEE_CONV_OVF_U_UN:
1050 ins->type = STACK_PTR;
1051 ins->opcode += ovf2ops_op_map [src1->type];
1053 case CEE_CONV_OVF_I1_UN:
1054 case CEE_CONV_OVF_I2_UN:
1055 case CEE_CONV_OVF_I4_UN:
1056 case CEE_CONV_OVF_U1_UN:
1057 case CEE_CONV_OVF_U2_UN:
1058 case CEE_CONV_OVF_U4_UN:
1059 ins->type = STACK_I4;
1060 ins->opcode += ovf2ops_op_map [src1->type];
1063 ins->type = STACK_PTR;
1064 switch (src1->type) {
1066 ins->opcode = OP_ICONV_TO_U;
1070 #if SIZEOF_VOID_P == 8
1071 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_MOVE;
1077 ins->opcode = OP_LCONV_TO_U;
1080 ins->opcode = OP_FCONV_TO_U;
1086 ins->type = STACK_I8;
1087 ins->opcode += unops_op_map [src1->type];
1089 case CEE_CONV_OVF_I8:
1090 case CEE_CONV_OVF_U8:
1091 ins->type = STACK_I8;
1092 ins->opcode += ovf3ops_op_map [src1->type];
1094 case CEE_CONV_OVF_U8_UN:
1095 case CEE_CONV_OVF_I8_UN:
1096 ins->type = STACK_I8;
1097 ins->opcode += ovf2ops_op_map [src1->type];
1100 ins->type = cfg->r4_stack_type;
1101 ins->opcode += unops_op_map [src1->type];
1104 ins->type = STACK_R8;
1105 ins->opcode += unops_op_map [src1->type];
1108 ins->type = STACK_R8;
1112 ins->type = STACK_I4;
1113 ins->opcode += ovfops_op_map [src1->type];
1116 case CEE_CONV_OVF_I:
1117 case CEE_CONV_OVF_U:
1118 ins->type = STACK_PTR;
1119 ins->opcode += ovfops_op_map [src1->type];
1122 case CEE_ADD_OVF_UN:
1124 case CEE_MUL_OVF_UN:
1126 case CEE_SUB_OVF_UN:
1127 ins->type = bin_num_table [src1->type] [src2->type];
1128 ins->opcode += ovfops_op_map [src1->type];
1129 if (ins->type == STACK_R8)
1130 ins->type = STACK_INV;
1132 case OP_LOAD_MEMBASE:
1133 ins->type = STACK_PTR;
1135 case OP_LOADI1_MEMBASE:
1136 case OP_LOADU1_MEMBASE:
1137 case OP_LOADI2_MEMBASE:
1138 case OP_LOADU2_MEMBASE:
1139 case OP_LOADI4_MEMBASE:
1140 case OP_LOADU4_MEMBASE:
1141 ins->type = STACK_PTR;
1143 case OP_LOADI8_MEMBASE:
1144 ins->type = STACK_I8;
1146 case OP_LOADR4_MEMBASE:
1147 ins->type = cfg->r4_stack_type;
1149 case OP_LOADR8_MEMBASE:
1150 ins->type = STACK_R8;
1153 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1157 if (ins->type == STACK_MP)
1158 ins->klass = mono_defaults.object_class;
1163 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1169 param_table [STACK_MAX] [STACK_MAX] = {
1174 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1179 switch (args->type) {
1189 for (i = 0; i < sig->param_count; ++i) {
1190 switch (args [i].type) {
1194 if (!sig->params [i]->byref)
1198 if (sig->params [i]->byref)
1200 switch (sig->params [i]->type) {
1201 case MONO_TYPE_CLASS:
1202 case MONO_TYPE_STRING:
1203 case MONO_TYPE_OBJECT:
1204 case MONO_TYPE_SZARRAY:
1205 case MONO_TYPE_ARRAY:
1212 if (sig->params [i]->byref)
1214 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1223 /*if (!param_table [args [i].type] [sig->params [i]->type])
1231 * When we need a pointer to the current domain many times in a method, we
1232 * call mono_domain_get() once and we store the result in a local variable.
1233 * This function returns the variable that represents the MonoDomain*.
1235 inline static MonoInst *
1236 mono_get_domainvar (MonoCompile *cfg)
1238 if (!cfg->domainvar)
1239 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1240 return cfg->domainvar;
1244 * The got_var contains the address of the Global Offset Table when AOT
1248 mono_get_got_var (MonoCompile *cfg)
1250 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1252 if (!cfg->got_var) {
1253 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1255 return cfg->got_var;
1259 mono_get_vtable_var (MonoCompile *cfg)
1261 g_assert (cfg->gshared);
1263 if (!cfg->rgctx_var) {
1264 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1265 /* force the var to be stack allocated */
1266 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1269 return cfg->rgctx_var;
1273 type_from_stack_type (MonoInst *ins) {
1274 switch (ins->type) {
1275 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1276 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1277 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1278 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1279 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1281 return &ins->klass->this_arg;
1282 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1283 case STACK_VTYPE: return &ins->klass->byval_arg;
1285 g_error ("stack type %d to monotype not handled\n", ins->type);
1290 static G_GNUC_UNUSED int
1291 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1293 t = mono_type_get_underlying_type (t);
1305 case MONO_TYPE_FNPTR:
1307 case MONO_TYPE_CLASS:
1308 case MONO_TYPE_STRING:
1309 case MONO_TYPE_OBJECT:
1310 case MONO_TYPE_SZARRAY:
1311 case MONO_TYPE_ARRAY:
1317 return cfg->r4_stack_type;
1320 case MONO_TYPE_VALUETYPE:
1321 case MONO_TYPE_TYPEDBYREF:
1323 case MONO_TYPE_GENERICINST:
1324 if (mono_type_generic_inst_is_valuetype (t))
1330 g_assert_not_reached ();
1337 array_access_to_klass (int opcode)
1341 return mono_defaults.byte_class;
1343 return mono_defaults.uint16_class;
1346 return mono_defaults.int_class;
1349 return mono_defaults.sbyte_class;
1352 return mono_defaults.int16_class;
1355 return mono_defaults.int32_class;
1357 return mono_defaults.uint32_class;
1360 return mono_defaults.int64_class;
1363 return mono_defaults.single_class;
1366 return mono_defaults.double_class;
1367 case CEE_LDELEM_REF:
1368 case CEE_STELEM_REF:
1369 return mono_defaults.object_class;
1371 g_assert_not_reached ();
1377 * We try to share variables when possible
1380 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1385 /* inlining can result in deeper stacks */
1386 if (slot >= cfg->header->max_stack)
1387 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1389 pos = ins->type - 1 + slot * STACK_MAX;
1391 switch (ins->type) {
1398 if ((vnum = cfg->intvars [pos]))
1399 return cfg->varinfo [vnum];
1400 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1401 cfg->intvars [pos] = res->inst_c0;
1404 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1410 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1413 * Don't use this if a generic_context is set, since that means AOT can't
1414 * look up the method using just the image+token.
1415 * table == 0 means this is a reference made from a wrapper.
1417 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1418 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1419 jump_info_token->image = image;
1420 jump_info_token->token = token;
1421 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1426 * This function is called to handle items that are left on the evaluation stack
1427 * at basic block boundaries. What happens is that we save the values to local variables
1428 * and we reload them later when first entering the target basic block (with the
1429 * handle_loaded_temps () function).
1430 * A single joint point will use the same variables (stored in the array bb->out_stack or
1431 * bb->in_stack, if the basic block is before or after the joint point).
1433 * This function needs to be called _before_ emitting the last instruction of
1434 * the bb (i.e. before emitting a branch).
1435 * If the stack merge fails at a join point, cfg->unverifiable is set.
1438 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1441 MonoBasicBlock *bb = cfg->cbb;
1442 MonoBasicBlock *outb;
1443 MonoInst *inst, **locals;
1448 if (cfg->verbose_level > 3)
1449 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1450 if (!bb->out_scount) {
1451 bb->out_scount = count;
1452 //printf ("bblock %d has out:", bb->block_num);
1454 for (i = 0; i < bb->out_count; ++i) {
1455 outb = bb->out_bb [i];
1456 /* exception handlers are linked, but they should not be considered for stack args */
1457 if (outb->flags & BB_EXCEPTION_HANDLER)
1459 //printf (" %d", outb->block_num);
1460 if (outb->in_stack) {
1462 bb->out_stack = outb->in_stack;
1468 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1469 for (i = 0; i < count; ++i) {
1471 * try to reuse temps already allocated for this purpouse, if they occupy the same
1472 * stack slot and if they are of the same type.
1473 * This won't cause conflicts since if 'local' is used to
1474 * store one of the values in the in_stack of a bblock, then
1475 * the same variable will be used for the same outgoing stack
1477 * This doesn't work when inlining methods, since the bblocks
1478 * in the inlined methods do not inherit their in_stack from
1479 * the bblock they are inlined to. See bug #58863 for an
1482 if (cfg->inlined_method)
1483 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1485 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1490 for (i = 0; i < bb->out_count; ++i) {
1491 outb = bb->out_bb [i];
1492 /* exception handlers are linked, but they should not be considered for stack args */
1493 if (outb->flags & BB_EXCEPTION_HANDLER)
1495 if (outb->in_scount) {
1496 if (outb->in_scount != bb->out_scount) {
1497 cfg->unverifiable = TRUE;
1500 continue; /* check they are the same locals */
1502 outb->in_scount = count;
1503 outb->in_stack = bb->out_stack;
1506 locals = bb->out_stack;
1508 for (i = 0; i < count; ++i) {
1509 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1510 inst->cil_code = sp [i]->cil_code;
1511 sp [i] = locals [i];
1512 if (cfg->verbose_level > 3)
1513 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1517 * It is possible that the out bblocks already have in_stack assigned, and
1518 * the in_stacks differ. In this case, we will store to all the different
1525 /* Find a bblock which has a different in_stack */
1527 while (bindex < bb->out_count) {
1528 outb = bb->out_bb [bindex];
1529 /* exception handlers are linked, but they should not be considered for stack args */
1530 if (outb->flags & BB_EXCEPTION_HANDLER) {
1534 if (outb->in_stack != locals) {
1535 for (i = 0; i < count; ++i) {
1536 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1537 inst->cil_code = sp [i]->cil_code;
1538 sp [i] = locals [i];
1539 if (cfg->verbose_level > 3)
1540 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1542 locals = outb->in_stack;
1552 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1556 if (cfg->compile_aot) {
1557 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1563 ji.type = patch_type;
1564 ji.data.target = data;
1565 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1566 mono_error_assert_ok (&error);
1568 EMIT_NEW_PCONST (cfg, ins, target);
1574 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1576 int ibitmap_reg = alloc_preg (cfg);
1577 #ifdef COMPRESSED_INTERFACE_BITMAP
1579 MonoInst *res, *ins;
1580 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1581 MONO_ADD_INS (cfg->cbb, ins);
1583 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1584 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1585 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1587 int ibitmap_byte_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1591 if (cfg->compile_aot) {
1592 int iid_reg = alloc_preg (cfg);
1593 int shifted_iid_reg = alloc_preg (cfg);
1594 int ibitmap_byte_address_reg = alloc_preg (cfg);
1595 int masked_iid_reg = alloc_preg (cfg);
1596 int iid_one_bit_reg = alloc_preg (cfg);
1597 int iid_bit_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1600 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1603 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1604 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1605 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1607 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1614 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1615 * stored in "klass_reg" implements the interface "klass".
1618 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1620 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1624 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1625 * stored in "vtable_reg" implements the interface "klass".
1628 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1630 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1634 * Emit code which checks whenever the interface id of @klass is smaller than
1635 * than the value given by max_iid_reg.
1638 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1639 MonoBasicBlock *false_target)
1641 if (cfg->compile_aot) {
1642 int iid_reg = alloc_preg (cfg);
1643 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1644 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1651 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1654 /* Same as above, but obtains max_iid from a vtable */
1656 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1657 MonoBasicBlock *false_target)
1659 int max_iid_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1662 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1665 /* Same as above, but obtains max_iid from a klass */
1667 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1668 MonoBasicBlock *false_target)
1670 int max_iid_reg = alloc_preg (cfg);
1672 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1673 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1677 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1679 int idepth_reg = alloc_preg (cfg);
1680 int stypes_reg = alloc_preg (cfg);
1681 int stype = alloc_preg (cfg);
1683 mono_class_setup_supertypes (klass);
1685 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1693 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1694 } else if (cfg->compile_aot) {
1695 int const_reg = alloc_preg (cfg);
1696 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1697 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1705 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1711 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1713 int intf_reg = alloc_preg (cfg);
1715 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1716 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1721 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1725 * Variant of the above that takes a register to the class, not the vtable.
1728 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1730 int intf_bit_reg = alloc_preg (cfg);
1732 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1733 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1734 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1738 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1742 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1745 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1747 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1748 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1750 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1754 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1756 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1760 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1762 if (cfg->compile_aot) {
1763 int const_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1765 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1773 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1776 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1779 int rank_reg = alloc_preg (cfg);
1780 int eclass_reg = alloc_preg (cfg);
1782 g_assert (!klass_inst);
1783 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1785 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1786 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1788 if (klass->cast_class == mono_defaults.object_class) {
1789 int parent_reg = alloc_preg (cfg);
1790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1791 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1792 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1793 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1794 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1795 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1796 } else if (klass->cast_class == mono_defaults.enum_class) {
1797 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1798 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1799 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1801 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1802 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1805 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1806 /* Check that the object is a vector too */
1807 int bounds_reg = alloc_preg (cfg);
1808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1809 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1810 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1813 int idepth_reg = alloc_preg (cfg);
1814 int stypes_reg = alloc_preg (cfg);
1815 int stype = alloc_preg (cfg);
1817 mono_class_setup_supertypes (klass);
1819 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1821 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1822 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1826 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1831 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1833 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1837 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1841 g_assert (val == 0);
1846 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1849 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1852 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1855 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1857 #if SIZEOF_REGISTER == 8
1859 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1865 val_reg = alloc_preg (cfg);
1867 if (SIZEOF_REGISTER == 8)
1868 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1870 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1873 /* This could be optimized further if neccesary */
1875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1882 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1913 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1920 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1921 g_assert (size < 10000);
1924 /* This could be optimized further if neccesary */
1926 cur_reg = alloc_preg (cfg);
1927 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1937 cur_reg = alloc_preg (cfg);
1938 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1939 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1947 cur_reg = alloc_preg (cfg);
1948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1949 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1955 cur_reg = alloc_preg (cfg);
1956 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1957 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1963 cur_reg = alloc_preg (cfg);
1964 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1965 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1973 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1977 if (cfg->compile_aot) {
1978 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1979 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1981 ins->sreg2 = c->dreg;
1982 MONO_ADD_INS (cfg->cbb, ins);
1984 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1986 ins->inst_offset = mini_get_tls_offset (tls_key);
1987 MONO_ADD_INS (cfg->cbb, ins);
1994 * Emit IR to push the current LMF onto the LMF stack.
1997 emit_push_lmf (MonoCompile *cfg)
2000 * Emit IR to push the LMF:
2001 * lmf_addr = <lmf_addr from tls>
2002 * lmf->lmf_addr = lmf_addr
2003 * lmf->prev_lmf = *lmf_addr
2006 int lmf_reg, prev_lmf_reg;
2007 MonoInst *ins, *lmf_ins;
2012 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2013 /* Load current lmf */
2014 lmf_ins = mono_get_lmf_intrinsic (cfg);
2016 MONO_ADD_INS (cfg->cbb, lmf_ins);
2017 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2018 lmf_reg = ins->dreg;
2019 /* Save previous_lmf */
2020 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2022 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2025 * Store lmf_addr in a variable, so it can be allocated to a global register.
2027 if (!cfg->lmf_addr_var)
2028 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2031 ins = mono_get_jit_tls_intrinsic (cfg);
2033 int jit_tls_dreg = ins->dreg;
2035 MONO_ADD_INS (cfg->cbb, ins);
2036 lmf_reg = alloc_preg (cfg);
2037 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2039 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2042 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2044 MONO_ADD_INS (cfg->cbb, lmf_ins);
2047 MonoInst *args [16], *jit_tls_ins, *ins;
2049 /* Inline mono_get_lmf_addr () */
2050 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2052 /* Load mono_jit_tls_id */
2053 if (cfg->compile_aot)
2054 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2056 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2057 /* call pthread_getspecific () */
2058 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2059 /* lmf_addr = &jit_tls->lmf */
2060 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2063 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2067 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2069 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2070 lmf_reg = ins->dreg;
2072 prev_lmf_reg = alloc_preg (cfg);
2073 /* Save previous_lmf */
2074 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2075 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2084 * Emit IR to pop the current LMF from the LMF stack.
2087 emit_pop_lmf (MonoCompile *cfg)
2089 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2095 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2096 lmf_reg = ins->dreg;
2098 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2099 /* Load previous_lmf */
2100 prev_lmf_reg = alloc_preg (cfg);
2101 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2103 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2106 * Emit IR to pop the LMF:
2107 * *(lmf->lmf_addr) = lmf->prev_lmf
2109 /* This could be called before emit_push_lmf () */
2110 if (!cfg->lmf_addr_var)
2111 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2112 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2114 prev_lmf_reg = alloc_preg (cfg);
2115 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2116 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2121 emit_instrumentation_call (MonoCompile *cfg, void *func)
2123 MonoInst *iargs [1];
2126 * Avoid instrumenting inlined methods since it can
2127 * distort profiling results.
2129 if (cfg->method != cfg->current_method)
2132 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2133 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2134 mono_emit_jit_icall (cfg, func, iargs);
2139 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2142 type = mini_get_underlying_type (type);
2143 switch (type->type) {
2144 case MONO_TYPE_VOID:
2145 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2152 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2156 case MONO_TYPE_FNPTR:
2157 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2158 case MONO_TYPE_CLASS:
2159 case MONO_TYPE_STRING:
2160 case MONO_TYPE_OBJECT:
2161 case MONO_TYPE_SZARRAY:
2162 case MONO_TYPE_ARRAY:
2163 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2166 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2169 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2171 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2173 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2174 case MONO_TYPE_VALUETYPE:
2175 if (type->data.klass->enumtype) {
2176 type = mono_class_enum_basetype (type->data.klass);
2179 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2180 case MONO_TYPE_TYPEDBYREF:
2181 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2182 case MONO_TYPE_GENERICINST:
2183 type = &type->data.generic_class->container_class->byval_arg;
2186 case MONO_TYPE_MVAR:
2188 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2190 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2195 //XXX this ignores if t is byref
2196 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
2199 * target_type_is_incompatible:
2200 * @cfg: MonoCompile context
2202 * Check that the item @arg on the evaluation stack can be stored
2203 * in the target type (can be a local, or field, etc).
2204 * The cfg arg can be used to check if we need verification or just
2207 * Returns: non-0 value if arg can't be stored on a target.
2210 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2212 MonoType *simple_type;
2215 if (target->byref) {
2216 /* FIXME: check that the pointed to types match */
2217 if (arg->type == STACK_MP) {
2218 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2219 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2220 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2222 /* if the target is native int& or same type */
2223 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2226 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2227 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2228 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2232 if (arg->type == STACK_PTR)
2237 simple_type = mini_get_underlying_type (target);
2238 switch (simple_type->type) {
2239 case MONO_TYPE_VOID:
2247 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2251 /* STACK_MP is needed when setting pinned locals */
2252 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2257 case MONO_TYPE_FNPTR:
2259 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2260 * in native int. (#688008).
2262 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2265 case MONO_TYPE_CLASS:
2266 case MONO_TYPE_STRING:
2267 case MONO_TYPE_OBJECT:
2268 case MONO_TYPE_SZARRAY:
2269 case MONO_TYPE_ARRAY:
2270 if (arg->type != STACK_OBJ)
2272 /* FIXME: check type compatibility */
2276 if (arg->type != STACK_I8)
2280 if (arg->type != cfg->r4_stack_type)
2284 if (arg->type != STACK_R8)
2287 case MONO_TYPE_VALUETYPE:
2288 if (arg->type != STACK_VTYPE)
2290 klass = mono_class_from_mono_type (simple_type);
2291 if (klass != arg->klass)
2294 case MONO_TYPE_TYPEDBYREF:
2295 if (arg->type != STACK_VTYPE)
2297 klass = mono_class_from_mono_type (simple_type);
2298 if (klass != arg->klass)
2301 case MONO_TYPE_GENERICINST:
2302 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2303 MonoClass *target_class;
2304 if (arg->type != STACK_VTYPE)
2306 klass = mono_class_from_mono_type (simple_type);
2307 target_class = mono_class_from_mono_type (target);
2308 /* The second cases is needed when doing partial sharing */
2309 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2313 if (arg->type != STACK_OBJ)
2315 /* FIXME: check type compatibility */
2319 case MONO_TYPE_MVAR:
2320 g_assert (cfg->gshared);
2321 if (mini_type_var_is_vt (simple_type)) {
2322 if (arg->type != STACK_VTYPE)
2325 if (arg->type != STACK_OBJ)
2330 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2336 * Prepare arguments for passing to a function call.
2337 * Return a non-zero value if the arguments can't be passed to the given
2339 * The type checks are not yet complete and some conversions may need
2340 * casts on 32 or 64 bit architectures.
2342 * FIXME: implement this using target_type_is_incompatible ()
2345 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2347 MonoType *simple_type;
2351 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2355 for (i = 0; i < sig->param_count; ++i) {
2356 if (sig->params [i]->byref) {
2357 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2361 simple_type = mini_get_underlying_type (sig->params [i]);
2363 switch (simple_type->type) {
2364 case MONO_TYPE_VOID:
2373 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2379 case MONO_TYPE_FNPTR:
2380 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2383 case MONO_TYPE_CLASS:
2384 case MONO_TYPE_STRING:
2385 case MONO_TYPE_OBJECT:
2386 case MONO_TYPE_SZARRAY:
2387 case MONO_TYPE_ARRAY:
2388 if (args [i]->type != STACK_OBJ)
2393 if (args [i]->type != STACK_I8)
2397 if (args [i]->type != cfg->r4_stack_type)
2401 if (args [i]->type != STACK_R8)
2404 case MONO_TYPE_VALUETYPE:
2405 if (simple_type->data.klass->enumtype) {
2406 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2409 if (args [i]->type != STACK_VTYPE)
2412 case MONO_TYPE_TYPEDBYREF:
2413 if (args [i]->type != STACK_VTYPE)
2416 case MONO_TYPE_GENERICINST:
2417 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2420 case MONO_TYPE_MVAR:
2422 if (args [i]->type != STACK_VTYPE)
2426 g_error ("unknown type 0x%02x in check_call_signature",
2434 callvirt_to_call (int opcode)
2437 case OP_CALL_MEMBASE:
2439 case OP_VOIDCALL_MEMBASE:
2441 case OP_FCALL_MEMBASE:
2443 case OP_RCALL_MEMBASE:
2445 case OP_VCALL_MEMBASE:
2447 case OP_LCALL_MEMBASE:
2450 g_assert_not_reached ();
2457 callvirt_to_call_reg (int opcode)
2460 case OP_CALL_MEMBASE:
2462 case OP_VOIDCALL_MEMBASE:
2463 return OP_VOIDCALL_REG;
2464 case OP_FCALL_MEMBASE:
2465 return OP_FCALL_REG;
2466 case OP_RCALL_MEMBASE:
2467 return OP_RCALL_REG;
2468 case OP_VCALL_MEMBASE:
2469 return OP_VCALL_REG;
2470 case OP_LCALL_MEMBASE:
2471 return OP_LCALL_REG;
2473 g_assert_not_reached ();
2479 /* Either METHOD or IMT_ARG needs to be set */
2481 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2485 if (COMPILE_LLVM (cfg)) {
2487 method_reg = alloc_preg (cfg);
2488 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2490 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2491 method_reg = ins->dreg;
2495 call->imt_arg_reg = method_reg;
2497 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2502 method_reg = alloc_preg (cfg);
2503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2505 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2506 method_reg = ins->dreg;
2509 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2512 static MonoJumpInfo *
2513 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2515 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2519 ji->data.target = target;
2525 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2528 return mono_class_check_context_used (klass);
2534 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2537 return mono_method_check_context_used (method);
2543 * check_method_sharing:
2545 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2548 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2550 gboolean pass_vtable = FALSE;
2551 gboolean pass_mrgctx = FALSE;
2553 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2554 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2555 gboolean sharable = FALSE;
2557 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2561 * Pass vtable iff target method might
2562 * be shared, which means that sharing
2563 * is enabled for its class and its
2564 * context is sharable (and it's not a
2567 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2571 if (mini_method_get_context (cmethod) &&
2572 mini_method_get_context (cmethod)->method_inst) {
2573 g_assert (!pass_vtable);
2575 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2578 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2583 if (out_pass_vtable)
2584 *out_pass_vtable = pass_vtable;
2585 if (out_pass_mrgctx)
2586 *out_pass_mrgctx = pass_mrgctx;
2589 inline static MonoCallInst *
2590 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2591 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2595 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2603 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2605 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2607 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2610 call->signature = sig;
2611 call->rgctx_reg = rgctx;
2612 sig_ret = mini_get_underlying_type (sig->ret);
2614 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2617 if (mini_type_is_vtype (sig_ret)) {
2618 call->vret_var = cfg->vret_addr;
2619 //g_assert_not_reached ();
2621 } else if (mini_type_is_vtype (sig_ret)) {
2622 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2625 temp->backend.is_pinvoke = sig->pinvoke;
2628 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2629 * address of return value to increase optimization opportunities.
2630 * Before vtype decomposition, the dreg of the call ins itself represents the
2631 * fact the call modifies the return value. After decomposition, the call will
2632 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2633 * will be transformed into an LDADDR.
2635 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2636 loada->dreg = alloc_preg (cfg);
2637 loada->inst_p0 = temp;
2638 /* We reference the call too since call->dreg could change during optimization */
2639 loada->inst_p1 = call;
2640 MONO_ADD_INS (cfg->cbb, loada);
2642 call->inst.dreg = temp->dreg;
2644 call->vret_var = loada;
2645 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2646 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2648 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2649 if (COMPILE_SOFT_FLOAT (cfg)) {
2651 * If the call has a float argument, we would need to do an r8->r4 conversion using
2652 * an icall, but that cannot be done during the call sequence since it would clobber
2653 * the call registers + the stack. So we do it before emitting the call.
2655 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2657 MonoInst *in = call->args [i];
2659 if (i >= sig->hasthis)
2660 t = sig->params [i - sig->hasthis];
2662 t = &mono_defaults.int_class->byval_arg;
2663 t = mono_type_get_underlying_type (t);
2665 if (!t->byref && t->type == MONO_TYPE_R4) {
2666 MonoInst *iargs [1];
2670 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2672 /* The result will be in an int vreg */
2673 call->args [i] = conv;
2679 call->need_unbox_trampoline = unbox_trampoline;
2682 if (COMPILE_LLVM (cfg))
2683 mono_llvm_emit_call (cfg, call);
2685 mono_arch_emit_call (cfg, call);
2687 mono_arch_emit_call (cfg, call);
2690 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2691 cfg->flags |= MONO_CFG_HAS_CALLS;
2697 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2699 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2700 cfg->uses_rgctx_reg = TRUE;
2701 call->rgctx_reg = TRUE;
2703 call->rgctx_arg_reg = rgctx_reg;
2707 inline static MonoInst*
2708 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2713 gboolean check_sp = FALSE;
2715 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2716 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2718 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2723 rgctx_reg = mono_alloc_preg (cfg);
2724 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2728 if (!cfg->stack_inbalance_var)
2729 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2731 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2732 ins->dreg = cfg->stack_inbalance_var->dreg;
2733 MONO_ADD_INS (cfg->cbb, ins);
2736 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2738 call->inst.sreg1 = addr->dreg;
2741 emit_imt_argument (cfg, call, NULL, imt_arg);
2743 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2748 sp_reg = mono_alloc_preg (cfg);
2750 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2752 MONO_ADD_INS (cfg->cbb, ins);
2754 /* Restore the stack so we don't crash when throwing the exception */
2755 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2756 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2757 MONO_ADD_INS (cfg->cbb, ins);
2759 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2760 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2764 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2766 return (MonoInst*)call;
2770 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2773 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2775 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2778 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2779 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2781 #ifndef DISABLE_REMOTING
2782 gboolean might_be_remote = FALSE;
2784 gboolean virtual_ = this_ins != NULL;
2785 gboolean enable_for_aot = TRUE;
2788 MonoInst *call_target = NULL;
2790 gboolean need_unbox_trampoline;
2793 sig = mono_method_signature (method);
2795 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2796 g_assert_not_reached ();
2799 rgctx_reg = mono_alloc_preg (cfg);
2800 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2803 if (method->string_ctor) {
2804 /* Create the real signature */
2805 /* FIXME: Cache these */
2806 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2807 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2812 context_used = mini_method_check_context_used (cfg, method);
2814 #ifndef DISABLE_REMOTING
2815 might_be_remote = this_ins && sig->hasthis &&
2816 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2817 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2819 if (might_be_remote && context_used) {
2822 g_assert (cfg->gshared);
2824 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2826 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2830 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2831 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2833 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2835 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2837 #ifndef DISABLE_REMOTING
2838 if (might_be_remote)
2839 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2842 call->method = method;
2843 call->inst.flags |= MONO_INST_HAS_METHOD;
2844 call->inst.inst_left = this_ins;
2845 call->tail_call = tail;
2848 int vtable_reg, slot_reg, this_reg;
2851 this_reg = this_ins->dreg;
2853 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2854 MonoInst *dummy_use;
2856 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2858 /* Make a call to delegate->invoke_impl */
2859 call->inst.inst_basereg = this_reg;
2860 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2861 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2863 /* We must emit a dummy use here because the delegate trampoline will
2864 replace the 'this' argument with the delegate target making this activation
2865 no longer a root for the delegate.
2866 This is an issue for delegates that target collectible code such as dynamic
2867 methods of GC'able assemblies.
2869 For a test case look into #667921.
2871 FIXME: a dummy use is not the best way to do it as the local register allocator
2872 will put it on a caller save register and spil it around the call.
2873 Ideally, we would either put it on a callee save register or only do the store part.
2875 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2877 return (MonoInst*)call;
2880 if ((!cfg->compile_aot || enable_for_aot) &&
2881 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2882 (MONO_METHOD_IS_FINAL (method) &&
2883 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2884 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2886 * the method is not virtual, we just need to ensure this is not null
2887 * and then we can call the method directly.
2889 #ifndef DISABLE_REMOTING
2890 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2892 * The check above ensures method is not gshared, this is needed since
2893 * gshared methods can't have wrappers.
2895 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2899 if (!method->string_ctor)
2900 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2902 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2903 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2905 * the method is virtual, but we can statically dispatch since either
2906 * it's class or the method itself are sealed.
2907 * But first we need to ensure it's not a null reference.
2909 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2911 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2912 } else if (call_target) {
2913 vtable_reg = alloc_preg (cfg);
2914 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2916 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2917 call->inst.sreg1 = call_target->dreg;
2918 call->inst.flags &= !MONO_INST_HAS_METHOD;
2920 vtable_reg = alloc_preg (cfg);
2921 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2922 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2923 guint32 imt_slot = mono_method_get_imt_slot (method);
2924 emit_imt_argument (cfg, call, call->method, imt_arg);
2925 slot_reg = vtable_reg;
2926 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2928 slot_reg = vtable_reg;
2929 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2930 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2932 g_assert (mono_method_signature (method)->generic_param_count);
2933 emit_imt_argument (cfg, call, call->method, imt_arg);
2937 call->inst.sreg1 = slot_reg;
2938 call->inst.inst_offset = offset;
2939 call->is_virtual = TRUE;
2943 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2946 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2948 return (MonoInst*)call;
2952 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2954 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2958 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2965 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2968 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2970 return (MonoInst*)call;
2974 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2976 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2980 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2984 * mono_emit_abs_call:
2986 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2988 inline static MonoInst*
2989 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2990 MonoMethodSignature *sig, MonoInst **args)
2992 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2996 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2999 if (cfg->abs_patches == NULL)
3000 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3001 g_hash_table_insert (cfg->abs_patches, ji, ji);
3002 ins = mono_emit_native_call (cfg, ji, sig, args);
3003 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3007 static MonoMethodSignature*
3008 sig_to_rgctx_sig (MonoMethodSignature *sig)
3010 // FIXME: memory allocation
3011 MonoMethodSignature *res;
3014 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
3015 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
3016 res->param_count = sig->param_count + 1;
3017 for (i = 0; i < sig->param_count; ++i)
3018 res->params [i] = sig->params [i];
3019 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3023 /* Make an indirect call to FSIG passing an additional argument */
3025 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3027 MonoMethodSignature *csig;
3028 MonoInst *args_buf [16];
3030 int i, pindex, tmp_reg;
3032 /* Make a call with an rgctx/extra arg */
3033 if (fsig->param_count + 2 < 16)
3036 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3039 args [pindex ++] = orig_args [0];
3040 for (i = 0; i < fsig->param_count; ++i)
3041 args [pindex ++] = orig_args [fsig->hasthis + i];
3042 tmp_reg = alloc_preg (cfg);
3043 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3044 csig = sig_to_rgctx_sig (fsig);
3045 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3048 /* Emit an indirect call to the function descriptor ADDR */
3050 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3052 int addr_reg, arg_reg;
3053 MonoInst *call_target;
3055 g_assert (cfg->llvm_only);
3058 * addr points to a <addr, arg> pair, load both of them, and
3059 * make a call to addr, passing arg as an extra arg.
3061 addr_reg = alloc_preg (cfg);
3062 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3063 arg_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3066 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3070 direct_icalls_enabled (MonoCompile *cfg)
3072 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3074 if (cfg->compile_llvm && !cfg->llvm_only)
3077 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3083 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
3086 * Call the jit icall without a wrapper if possible.
3087 * The wrapper is needed for the following reasons:
3088 * - to handle exceptions thrown using mono_raise_exceptions () from the
3089 * icall function. The EH code needs the lmf frame pushed by the
3090 * wrapper to be able to unwind back to managed code.
3091 * - to be able to do stack walks for asynchronously suspended
3092 * threads when debugging.
3094 if (info->no_raise && direct_icalls_enabled (cfg)) {
3098 if (!info->wrapper_method) {
3099 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3100 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3102 mono_memory_barrier ();
3106 * Inline the wrapper method, which is basically a call to the C icall, and
3107 * an exception check.
3109 costs = inline_method (cfg, info->wrapper_method, NULL,
3110 args, NULL, il_offset, TRUE);
3111 g_assert (costs > 0);
3112 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3116 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3121 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3123 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3124 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3128 * Native code might return non register sized integers
3129 * without initializing the upper bits.
3131 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3132 case OP_LOADI1_MEMBASE:
3133 widen_op = OP_ICONV_TO_I1;
3135 case OP_LOADU1_MEMBASE:
3136 widen_op = OP_ICONV_TO_U1;
3138 case OP_LOADI2_MEMBASE:
3139 widen_op = OP_ICONV_TO_I2;
3141 case OP_LOADU2_MEMBASE:
3142 widen_op = OP_ICONV_TO_U2;
3148 if (widen_op != -1) {
3149 int dreg = alloc_preg (cfg);
3152 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3153 widen->type = ins->type;
3164 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
3166 MonoInst *args [16];
3168 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
3169 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
3171 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
3175 get_memcpy_method (void)
3177 static MonoMethod *memcpy_method = NULL;
3178 if (!memcpy_method) {
3179 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3181 g_error ("Old corlib found. Install a new one");
3183 return memcpy_method;
3187 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3189 MonoClassField *field;
3190 gpointer iter = NULL;
3192 while ((field = mono_class_get_fields (klass, &iter))) {
3195 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3197 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3198 if (mini_type_is_reference (mono_field_get_type (field))) {
3199 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3200 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3202 MonoClass *field_class = mono_class_from_mono_type (field->type);
3203 if (field_class->has_references)
3204 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3210 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3212 int card_table_shift_bits;
3213 gpointer card_table_mask;
3215 MonoInst *dummy_use;
3216 int nursery_shift_bits;
3217 size_t nursery_size;
3219 if (!cfg->gen_write_barriers)
3222 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3224 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3226 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3229 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3230 wbarrier->sreg1 = ptr->dreg;
3231 wbarrier->sreg2 = value->dreg;
3232 MONO_ADD_INS (cfg->cbb, wbarrier);
3233 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3234 int offset_reg = alloc_preg (cfg);
3238 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3239 if (card_table_mask)
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3242 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3243 * IMM's larger than 32bits.
3245 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3246 card_reg = ins->dreg;
3248 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3249 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3251 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3252 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3255 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3259 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3261 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3262 unsigned need_wb = 0;
3267 /*types with references can't have alignment smaller than sizeof(void*) */
3268 if (align < SIZEOF_VOID_P)
3271 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3272 if (size > 32 * SIZEOF_VOID_P)
3275 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3277 /* We don't unroll more than 5 stores to avoid code bloat. */
3278 if (size > 5 * SIZEOF_VOID_P) {
3279 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3280 size += (SIZEOF_VOID_P - 1);
3281 size &= ~(SIZEOF_VOID_P - 1);
3283 EMIT_NEW_ICONST (cfg, iargs [2], size);
3284 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3285 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3289 destreg = iargs [0]->dreg;
3290 srcreg = iargs [1]->dreg;
3293 dest_ptr_reg = alloc_preg (cfg);
3294 tmp_reg = alloc_preg (cfg);
3297 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3299 while (size >= SIZEOF_VOID_P) {
3300 MonoInst *load_inst;
3301 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3302 load_inst->dreg = tmp_reg;
3303 load_inst->inst_basereg = srcreg;
3304 load_inst->inst_offset = offset;
3305 MONO_ADD_INS (cfg->cbb, load_inst);
3307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3310 emit_write_barrier (cfg, iargs [0], load_inst);
3312 offset += SIZEOF_VOID_P;
3313 size -= SIZEOF_VOID_P;
3316 /*tmp += sizeof (void*)*/
3317 if (size >= SIZEOF_VOID_P) {
3318 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3319 MONO_ADD_INS (cfg->cbb, iargs [0]);
3323 /* Those cannot be references since size < sizeof (void*) */
3325 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3326 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3332 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3333 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3339 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3349 * Emit code to copy a valuetype of type @klass whose address is stored in
3350 * @src->dreg to memory whose address is stored at @dest->dreg.
3353 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3355 MonoInst *iargs [4];
3358 MonoMethod *memcpy_method;
3359 MonoInst *size_ins = NULL;
3360 MonoInst *memcpy_ins = NULL;
3364 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3367 * This check breaks with spilled vars... need to handle it during verification anyway.
3368 * g_assert (klass && klass == src->klass && klass == dest->klass);
3371 if (mini_is_gsharedvt_klass (klass)) {
3373 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3374 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3378 n = mono_class_native_size (klass, &align);
3380 n = mono_class_value_size (klass, &align);
3382 /* if native is true there should be no references in the struct */
3383 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3384 /* Avoid barriers when storing to the stack */
3385 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3386 (dest->opcode == OP_LDADDR))) {
3392 context_used = mini_class_check_context_used (cfg, klass);
3394 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3395 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3397 } else if (context_used) {
3398 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3400 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3401 if (!cfg->compile_aot)
3402 mono_class_compute_gc_descriptor (klass);
3406 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3408 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3413 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3414 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3415 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3420 iargs [2] = size_ins;
3422 EMIT_NEW_ICONST (cfg, iargs [2], n);
3424 memcpy_method = get_memcpy_method ();
3426 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3428 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3433 get_memset_method (void)
3435 static MonoMethod *memset_method = NULL;
3436 if (!memset_method) {
3437 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3439 g_error ("Old corlib found. Install a new one");
3441 return memset_method;
3445 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3447 MonoInst *iargs [3];
3450 MonoMethod *memset_method;
3451 MonoInst *size_ins = NULL;
3452 MonoInst *bzero_ins = NULL;
3453 static MonoMethod *bzero_method;
3455 /* FIXME: Optimize this for the case when dest is an LDADDR */
3456 mono_class_init (klass);
3457 if (mini_is_gsharedvt_klass (klass)) {
3458 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3459 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3461 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3462 g_assert (bzero_method);
3464 iargs [1] = size_ins;
3465 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3469 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3471 n = mono_class_value_size (klass, &align);
3473 if (n <= sizeof (gpointer) * 8) {
3474 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3477 memset_method = get_memset_method ();
3479 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3480 EMIT_NEW_ICONST (cfg, iargs [2], n);
3481 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3488 * Emit IR to return either the this pointer for instance method,
3489 * or the mrgctx for static methods.
3492 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3494 MonoInst *this_ins = NULL;
3496 g_assert (cfg->gshared);
3498 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3499 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3500 !method->klass->valuetype)
3501 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3503 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3504 MonoInst *mrgctx_loc, *mrgctx_var;
3506 g_assert (!this_ins);
3507 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3509 mrgctx_loc = mono_get_vtable_var (cfg);
3510 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3513 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3514 MonoInst *vtable_loc, *vtable_var;
3516 g_assert (!this_ins);
3518 vtable_loc = mono_get_vtable_var (cfg);
3519 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3521 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3522 MonoInst *mrgctx_var = vtable_var;
3525 vtable_reg = alloc_preg (cfg);
3526 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3527 vtable_var->type = STACK_PTR;
3535 vtable_reg = alloc_preg (cfg);
3536 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3541 static MonoJumpInfoRgctxEntry *
3542 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3544 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3545 res->method = method;
3546 res->in_mrgctx = in_mrgctx;
3547 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3548 res->data->type = patch_type;
3549 res->data->data.target = patch_data;
3550 res->info_type = info_type;
3555 static inline MonoInst*
3556 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3558 MonoInst *args [16];
3561 // FIXME: No fastpath since the slot is not a compile time constant
3563 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3564 if (entry->in_mrgctx)
3565 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3567 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3571 * FIXME: This can be called during decompose, which is a problem since it creates
3573 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3575 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3577 MonoBasicBlock *is_null_bb, *end_bb;
3578 MonoInst *res, *ins, *call;
3581 slot = mini_get_rgctx_entry_slot (entry);
3583 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3584 index = MONO_RGCTX_SLOT_INDEX (slot);
3586 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3587 for (depth = 0; ; ++depth) {
3588 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3590 if (index < size - 1)
3595 NEW_BBLOCK (cfg, end_bb);
3596 NEW_BBLOCK (cfg, is_null_bb);
3599 rgctx_reg = rgctx->dreg;
3601 rgctx_reg = alloc_preg (cfg);
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3604 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3605 NEW_BBLOCK (cfg, is_null_bb);
3607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3608 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3611 for (i = 0; i < depth; ++i) {
3612 int array_reg = alloc_preg (cfg);
3614 /* load ptr to next array */
3615 if (mrgctx && i == 0)
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3619 rgctx_reg = array_reg;
3620 /* is the ptr null? */
3621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3622 /* if yes, jump to actual trampoline */
3623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3627 val_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3629 /* is the slot null? */
3630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3631 /* if yes, jump to actual trampoline */
3632 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3635 res_reg = alloc_preg (cfg);
3636 MONO_INST_NEW (cfg, ins, OP_MOVE);
3637 ins->dreg = res_reg;
3638 ins->sreg1 = val_reg;
3639 MONO_ADD_INS (cfg->cbb, ins);
3641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3644 MONO_START_BB (cfg, is_null_bb);
3646 EMIT_NEW_ICONST (cfg, args [1], index);
3648 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3650 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3651 MONO_INST_NEW (cfg, ins, OP_MOVE);
3652 ins->dreg = res_reg;
3653 ins->sreg1 = call->dreg;
3654 MONO_ADD_INS (cfg->cbb, ins);
3655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3657 MONO_START_BB (cfg, end_bb);
3666 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3669 static inline MonoInst*
3670 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3673 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3675 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3679 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3680 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3682 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3683 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3685 return emit_rgctx_fetch (cfg, rgctx, entry);
3689 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3690 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3692 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3693 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3695 return emit_rgctx_fetch (cfg, rgctx, entry);
3699 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3700 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3702 MonoJumpInfoGSharedVtCall *call_info;
3703 MonoJumpInfoRgctxEntry *entry;
3706 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3707 call_info->sig = sig;
3708 call_info->method = cmethod;
3710 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3711 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3713 return emit_rgctx_fetch (cfg, rgctx, entry);
3717 * emit_get_rgctx_virt_method:
3719 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3722 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3723 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3725 MonoJumpInfoVirtMethod *info;
3726 MonoJumpInfoRgctxEntry *entry;
3729 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3730 info->klass = klass;
3731 info->method = virt_method;
3733 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3734 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3736 return emit_rgctx_fetch (cfg, rgctx, entry);
3740 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3741 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3743 MonoJumpInfoRgctxEntry *entry;
3746 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3747 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3749 return emit_rgctx_fetch (cfg, rgctx, entry);
3753 * emit_get_rgctx_method:
3755 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3756 * normal constants, else emit a load from the rgctx.
3759 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3760 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3762 if (!context_used) {
3765 switch (rgctx_type) {
3766 case MONO_RGCTX_INFO_METHOD:
3767 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3769 case MONO_RGCTX_INFO_METHOD_RGCTX:
3770 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3773 g_assert_not_reached ();
3776 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3777 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3779 return emit_rgctx_fetch (cfg, rgctx, entry);
3784 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3785 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3787 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3788 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3790 return emit_rgctx_fetch (cfg, rgctx, entry);
3794 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3796 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3797 MonoRuntimeGenericContextInfoTemplate *template_;
3802 for (i = 0; i < info->num_entries; ++i) {
3803 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3805 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3809 if (info->num_entries == info->count_entries) {
3810 MonoRuntimeGenericContextInfoTemplate *new_entries;
3811 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3813 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3815 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3816 info->entries = new_entries;
3817 info->count_entries = new_count_entries;
3820 idx = info->num_entries;
3821 template_ = &info->entries [idx];
3822 template_->info_type = rgctx_type;
3823 template_->data = data;
3825 info->num_entries ++;
3831 * emit_get_gsharedvt_info:
3833 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3836 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3841 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3842 /* Load info->entries [idx] */
3843 dreg = alloc_preg (cfg);
3844 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3850 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3852 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3856 * On return the caller must check @klass for load errors.
3859 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3861 MonoInst *vtable_arg;
3864 context_used = mini_class_check_context_used (cfg, klass);
3867 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3868 klass, MONO_RGCTX_INFO_VTABLE);
3870 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3874 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3877 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3881 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3882 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3884 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3885 ins->sreg1 = vtable_arg->dreg;
3886 MONO_ADD_INS (cfg->cbb, ins);
3888 static int byte_offset = -1;
3889 static guint8 bitmask;
3890 int bits_reg, inited_reg;
3891 MonoBasicBlock *inited_bb;
3892 MonoInst *args [16];
3894 if (byte_offset < 0)
3895 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3897 bits_reg = alloc_ireg (cfg);
3898 inited_reg = alloc_ireg (cfg);
3900 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3903 NEW_BBLOCK (cfg, inited_bb);
3905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3908 args [0] = vtable_arg;
3909 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3911 MONO_START_BB (cfg, inited_bb);
3916 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3920 if (cfg->gen_seq_points && cfg->method == method) {
3921 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3923 ins->flags |= MONO_INST_NONEMPTY_STACK;
3924 MONO_ADD_INS (cfg->cbb, ins);
3929 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3931 if (mini_get_debug_options ()->better_cast_details) {
3932 int vtable_reg = alloc_preg (cfg);
3933 int klass_reg = alloc_preg (cfg);
3934 MonoBasicBlock *is_null_bb = NULL;
3936 int to_klass_reg, context_used;
3939 NEW_BBLOCK (cfg, is_null_bb);
3941 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3942 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3945 tls_get = mono_get_jit_tls_intrinsic (cfg);
3947 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3951 MONO_ADD_INS (cfg->cbb, tls_get);
3952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3955 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3957 context_used = mini_class_check_context_used (cfg, klass);
3959 MonoInst *class_ins;
3961 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3962 to_klass_reg = class_ins->dreg;
3964 to_klass_reg = alloc_preg (cfg);
3965 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3970 MONO_START_BB (cfg, is_null_bb);
3975 reset_cast_details (MonoCompile *cfg)
3977 /* Reset the variables holding the cast details */
3978 if (mini_get_debug_options ()->better_cast_details) {
3979 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3981 MONO_ADD_INS (cfg->cbb, tls_get);
3982 /* It is enough to reset the from field */
3983 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3988 * On return the caller must check @array_class for load errors
3991 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3993 int vtable_reg = alloc_preg (cfg);
3996 context_used = mini_class_check_context_used (cfg, array_class);
3998 save_cast_details (cfg, array_class, obj->dreg, FALSE);
4000 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4002 if (cfg->opt & MONO_OPT_SHARED) {
4003 int class_reg = alloc_preg (cfg);
4006 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4007 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
4008 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
4009 } else if (context_used) {
4010 MonoInst *vtable_ins;
4012 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
4013 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
4015 if (cfg->compile_aot) {
4019 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4021 vt_reg = alloc_preg (cfg);
4022 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
4023 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
4026 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4032 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4034 reset_cast_details (cfg);
4038 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4039 * generic code is generated.
4042 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4044 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4047 MonoInst *rgctx, *addr;
4049 /* FIXME: What if the class is shared? We might not
4050 have to get the address of the method from the
4052 addr = emit_get_rgctx_method (cfg, context_used, method,
4053 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4054 if (cfg->llvm_only) {
4055 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
4056 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4058 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4060 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4063 gboolean pass_vtable, pass_mrgctx;
4064 MonoInst *rgctx_arg = NULL;
4066 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4067 g_assert (!pass_mrgctx);
4070 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4073 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4076 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4081 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4085 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4086 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4087 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4088 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4090 obj_reg = sp [0]->dreg;
4091 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4092 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4094 /* FIXME: generics */
4095 g_assert (klass->rank == 0);
4098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4099 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4105 MonoInst *element_class;
4107 /* This assertion is from the unboxcast insn */
4108 g_assert (klass->rank == 0);
4110 element_class = emit_get_rgctx_klass (cfg, context_used,
4111 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4113 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4114 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4116 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4117 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4118 reset_cast_details (cfg);
4121 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4122 MONO_ADD_INS (cfg->cbb, add);
4123 add->type = STACK_MP;
4130 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4132 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4133 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4137 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4143 args [1] = klass_inst;
4146 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4148 NEW_BBLOCK (cfg, is_ref_bb);
4149 NEW_BBLOCK (cfg, is_nullable_bb);
4150 NEW_BBLOCK (cfg, end_bb);
4151 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4152 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4158 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4159 addr_reg = alloc_dreg (cfg, STACK_MP);
4163 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4164 MONO_ADD_INS (cfg->cbb, addr);
4166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4169 MONO_START_BB (cfg, is_ref_bb);
4171 /* Save the ref to a temporary */
4172 dreg = alloc_ireg (cfg);
4173 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4174 addr->dreg = addr_reg;
4175 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4179 MONO_START_BB (cfg, is_nullable_bb);
4182 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4183 MonoInst *unbox_call;
4184 MonoMethodSignature *unbox_sig;
4186 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4187 unbox_sig->ret = &klass->byval_arg;
4188 unbox_sig->param_count = 1;
4189 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4192 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4194 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4196 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4197 addr->dreg = addr_reg;
4200 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4203 MONO_START_BB (cfg, end_bb);
4206 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4212 * Returns NULL and set the cfg exception on error.
4215 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4217 MonoInst *iargs [2];
4222 MonoRgctxInfoType rgctx_info;
4223 MonoInst *iargs [2];
4224 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4226 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4228 if (cfg->opt & MONO_OPT_SHARED)
4229 rgctx_info = MONO_RGCTX_INFO_KLASS;
4231 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4232 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4234 if (cfg->opt & MONO_OPT_SHARED) {
4235 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4237 alloc_ftn = ves_icall_object_new;
4240 alloc_ftn = ves_icall_object_new_specific;
4243 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4244 if (known_instance_size) {
4245 int size = mono_class_instance_size (klass);
4246 if (size < sizeof (MonoObject))
4247 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4249 EMIT_NEW_ICONST (cfg, iargs [1], size);
4251 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4254 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4257 if (cfg->opt & MONO_OPT_SHARED) {
4258 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4259 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4261 alloc_ftn = ves_icall_object_new;
4262 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4263 /* This happens often in argument checking code, eg. throw new FooException... */
4264 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4265 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4266 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4268 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4269 MonoMethod *managed_alloc = NULL;
4273 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4274 cfg->exception_ptr = klass;
4278 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4280 if (managed_alloc) {
4281 int size = mono_class_instance_size (klass);
4282 if (size < sizeof (MonoObject))
4283 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4285 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4286 EMIT_NEW_ICONST (cfg, iargs [1], size);
4287 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4289 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4291 guint32 lw = vtable->klass->instance_size;
4292 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4293 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4294 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4297 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4301 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4305 * Returns NULL and set the cfg exception on error.
4308 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4310 MonoInst *alloc, *ins;
4312 if (mono_class_is_nullable (klass)) {
4313 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4316 if (cfg->llvm_only && cfg->gsharedvt) {
4317 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4318 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4319 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4321 /* FIXME: What if the class is shared? We might not
4322 have to get the method address from the RGCTX. */
4323 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4324 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4325 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4327 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4330 gboolean pass_vtable, pass_mrgctx;
4331 MonoInst *rgctx_arg = NULL;
4333 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4334 g_assert (!pass_mrgctx);
4337 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4340 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4343 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4347 if (mini_is_gsharedvt_klass (klass)) {
4348 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4349 MonoInst *res, *is_ref, *src_var, *addr;
4352 dreg = alloc_ireg (cfg);
4354 NEW_BBLOCK (cfg, is_ref_bb);
4355 NEW_BBLOCK (cfg, is_nullable_bb);
4356 NEW_BBLOCK (cfg, end_bb);
4357 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4358 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4362 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4365 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4368 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4369 ins->opcode = OP_STOREV_MEMBASE;
4371 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4372 res->type = STACK_OBJ;
4374 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4377 MONO_START_BB (cfg, is_ref_bb);
4379 /* val is a vtype, so has to load the value manually */
4380 src_var = get_vreg_to_inst (cfg, val->dreg);
4382 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4383 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4388 MONO_START_BB (cfg, is_nullable_bb);
4391 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4392 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4394 MonoMethodSignature *box_sig;
4397 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4398 * construct that method at JIT time, so have to do things by hand.
4400 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4401 box_sig->ret = &mono_defaults.object_class->byval_arg;
4402 box_sig->param_count = 1;
4403 box_sig->params [0] = &klass->byval_arg;
4406 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4408 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4409 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4410 res->type = STACK_OBJ;
4414 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4416 MONO_START_BB (cfg, end_bb);
4420 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4424 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4430 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4433 MonoGenericContainer *container;
4434 MonoGenericInst *ginst;
4436 if (klass->generic_class) {
4437 container = klass->generic_class->container_class->generic_container;
4438 ginst = klass->generic_class->context.class_inst;
4439 } else if (klass->generic_container && context_used) {
4440 container = klass->generic_container;
4441 ginst = container->context.class_inst;
4446 for (i = 0; i < container->type_argc; ++i) {
4448 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4450 type = ginst->type_argv [i];
4451 if (mini_type_is_reference (type))
4457 static GHashTable* direct_icall_type_hash;
4460 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4462 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4463 if (!direct_icalls_enabled (cfg))
4467 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4468 * Whitelist a few icalls for now.
4470 if (!direct_icall_type_hash) {
4471 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4473 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4474 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4475 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4476 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4477 mono_memory_barrier ();
4478 direct_icall_type_hash = h;
4481 if (cmethod->klass == mono_defaults.math_class)
4483 /* No locking needed */
4484 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4489 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4492 emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4494 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4495 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4499 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4501 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
4504 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4505 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4506 reset_cast_details (cfg);
4512 get_castclass_cache_idx (MonoCompile *cfg)
4514 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4515 cfg->castclass_cache_index ++;
4516 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4521 emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4526 args [0] = obj; /* obj */
4527 EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
4529 idx = get_castclass_cache_idx (cfg); /* inline cache*/
4530 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4532 return emit_isinst_with_cache (cfg, klass, args);
4536 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4545 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4548 idx = get_castclass_cache_idx (cfg);
4549 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4551 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4552 return emit_castclass_with_cache (cfg, klass, args);
4556 * Returns NULL and set the cfg exception on error.
4559 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4561 MonoBasicBlock *is_null_bb;
4562 int obj_reg = src->dreg;
4563 int vtable_reg = alloc_preg (cfg);
4564 MonoInst *klass_inst = NULL;
4566 if (src->opcode == OP_PCONST && src->inst_p0 == 0)
4572 if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4573 MonoInst *cache_ins;
4575 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4580 /* klass - it's the second element of the cache entry*/
4581 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4584 args [2] = cache_ins;
4586 return emit_castclass_with_cache (cfg, klass, args);
4589 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4592 NEW_BBLOCK (cfg, is_null_bb);
4594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4597 save_cast_details (cfg, klass, obj_reg, FALSE);
4599 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4601 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4603 int klass_reg = alloc_preg (cfg);
4605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4607 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4608 /* the remoting code is broken, access the class for now */
4609 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4610 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4612 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4613 cfg->exception_ptr = klass;
4616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4621 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4624 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4628 MONO_START_BB (cfg, is_null_bb);
4630 reset_cast_details (cfg);
4636 * Returns NULL and set the cfg exception on error.
4639 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4642 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4643 int obj_reg = src->dreg;
4644 int vtable_reg = alloc_preg (cfg);
4645 int res_reg = alloc_ireg_ref (cfg);
4646 MonoInst *klass_inst = NULL;
4651 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4652 MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4654 args [0] = src; /* obj */
4656 /* klass - it's the second element of the cache entry*/
4657 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4659 args [2] = cache_ins; /* cache */
4660 return emit_isinst_with_cache (cfg, klass, args);
4663 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4666 NEW_BBLOCK (cfg, is_null_bb);
4667 NEW_BBLOCK (cfg, false_bb);
4668 NEW_BBLOCK (cfg, end_bb);
4670 /* Do the assignment at the beginning, so the other assignment can be if converted */
4671 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4672 ins->type = STACK_OBJ;
4675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4680 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4681 g_assert (!context_used);
4682 /* the is_null_bb target simply copies the input register to the output */
4683 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4685 int klass_reg = alloc_preg (cfg);
4688 int rank_reg = alloc_preg (cfg);
4689 int eclass_reg = alloc_preg (cfg);
4691 g_assert (!context_used);
4692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4697 if (klass->cast_class == mono_defaults.object_class) {
4698 int parent_reg = alloc_preg (cfg);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4700 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4701 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4702 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4703 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4704 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4705 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4706 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4707 } else if (klass->cast_class == mono_defaults.enum_class) {
4708 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4710 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4711 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4713 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4714 /* Check that the object is a vector too */
4715 int bounds_reg = alloc_preg (cfg);
4716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4721 /* the is_null_bb target simply copies the input register to the output */
4722 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4724 } else if (mono_class_is_nullable (klass)) {
4725 g_assert (!context_used);
4726 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4727 /* the is_null_bb target simply copies the input register to the output */
4728 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4730 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4731 g_assert (!context_used);
4732 /* the remoting code is broken, access the class for now */
4733 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4734 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4736 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4737 cfg->exception_ptr = klass;
4740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4745 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4749 /* the is_null_bb target simply copies the input register to the output */
4750 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4755 MONO_START_BB (cfg, false_bb);
4757 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4758 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4760 MONO_START_BB (cfg, is_null_bb);
4762 MONO_START_BB (cfg, end_bb);
4768 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4770 /* This opcode takes as input an object reference and a class, and returns:
4771 0) if the object is an instance of the class,
4772 1) if the object is not instance of the class,
4773 2) if the object is a proxy whose type cannot be determined */
4776 #ifndef DISABLE_REMOTING
4777 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4779 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4781 int obj_reg = src->dreg;
4782 int dreg = alloc_ireg (cfg);
4784 #ifndef DISABLE_REMOTING
4785 int klass_reg = alloc_preg (cfg);
4788 NEW_BBLOCK (cfg, true_bb);
4789 NEW_BBLOCK (cfg, false_bb);
4790 NEW_BBLOCK (cfg, end_bb);
4791 #ifndef DISABLE_REMOTING
4792 NEW_BBLOCK (cfg, false2_bb);
4793 NEW_BBLOCK (cfg, no_proxy_bb);
4796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4797 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4799 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4800 #ifndef DISABLE_REMOTING
4801 NEW_BBLOCK (cfg, interface_fail_bb);
4804 tmp_reg = alloc_preg (cfg);
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4806 #ifndef DISABLE_REMOTING
4807 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4808 MONO_START_BB (cfg, interface_fail_bb);
4809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4811 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4813 tmp_reg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4816 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4818 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4821 #ifndef DISABLE_REMOTING
4822 tmp_reg = alloc_preg (cfg);
4823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4826 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4827 tmp_reg = alloc_preg (cfg);
4828 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4829 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4831 tmp_reg = alloc_preg (cfg);
4832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4834 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4836 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4837 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4839 MONO_START_BB (cfg, no_proxy_bb);
4841 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4843 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4847 MONO_START_BB (cfg, false_bb);
4849 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4850 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4852 #ifndef DISABLE_REMOTING
4853 MONO_START_BB (cfg, false2_bb);
4855 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4856 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4859 MONO_START_BB (cfg, true_bb);
4861 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4863 MONO_START_BB (cfg, end_bb);
4866 MONO_INST_NEW (cfg, ins, OP_ICONST);
4868 ins->type = STACK_I4;
4874 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4876 /* This opcode takes as input an object reference and a class, and returns:
4877 0) if the object is an instance of the class,
4878 1) if the object is a proxy whose type cannot be determined
4879 an InvalidCastException exception is thrown otherwhise*/
4882 #ifndef DISABLE_REMOTING
4883 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4885 MonoBasicBlock *ok_result_bb;
4887 int obj_reg = src->dreg;
4888 int dreg = alloc_ireg (cfg);
4889 int tmp_reg = alloc_preg (cfg);
4891 #ifndef DISABLE_REMOTING
4892 int klass_reg = alloc_preg (cfg);
4893 NEW_BBLOCK (cfg, end_bb);
4896 NEW_BBLOCK (cfg, ok_result_bb);
4898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4899 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4901 save_cast_details (cfg, klass, obj_reg, FALSE);
4903 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4904 #ifndef DISABLE_REMOTING
4905 NEW_BBLOCK (cfg, interface_fail_bb);
4907 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4908 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4909 MONO_START_BB (cfg, interface_fail_bb);
4910 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4912 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4914 tmp_reg = alloc_preg (cfg);
4915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4917 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4919 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4920 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4923 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4924 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4927 #ifndef DISABLE_REMOTING
4928 NEW_BBLOCK (cfg, no_proxy_bb);
4930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4932 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4934 tmp_reg = alloc_preg (cfg);
4935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4938 tmp_reg = alloc_preg (cfg);
4939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4943 NEW_BBLOCK (cfg, fail_1_bb);
4945 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4947 MONO_START_BB (cfg, fail_1_bb);
4949 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4952 MONO_START_BB (cfg, no_proxy_bb);
4954 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4956 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4960 MONO_START_BB (cfg, ok_result_bb);
4962 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4964 #ifndef DISABLE_REMOTING
4965 MONO_START_BB (cfg, end_bb);
4969 MONO_INST_NEW (cfg, ins, OP_ICONST);
4971 ins->type = STACK_I4;
4976 static G_GNUC_UNUSED MonoInst*
4977 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4979 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4980 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4983 switch (enum_type->type) {
4986 #if SIZEOF_REGISTER == 8
4998 MonoInst *load, *and_, *cmp, *ceq;
4999 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5000 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5001 int dest_reg = alloc_ireg (cfg);
5003 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
5004 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
5005 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
5006 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
5008 ceq->type = STACK_I4;
5011 load = mono_decompose_opcode (cfg, load);
5012 and_ = mono_decompose_opcode (cfg, and_);
5013 cmp = mono_decompose_opcode (cfg, cmp);
5014 ceq = mono_decompose_opcode (cfg, ceq);
5022 * Returns NULL and set the cfg exception on error.
5024 static G_GNUC_UNUSED MonoInst*
5025 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5029 gpointer trampoline;
5030 MonoInst *obj, *method_ins, *tramp_ins;
5034 if (virtual_ && !cfg->llvm_only) {
5035 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5038 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5042 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5046 /* Inline the contents of mono_delegate_ctor */
5048 /* Set target field */
5049 /* Optimize away setting of NULL target */
5050 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5052 if (cfg->gen_write_barriers) {
5053 dreg = alloc_preg (cfg);
5054 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5055 emit_write_barrier (cfg, ptr, target);
5059 /* Set method field */
5060 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5061 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5064 * To avoid looking up the compiled code belonging to the target method
5065 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5066 * store it, and we fill it after the method has been compiled.
5068 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5069 MonoInst *code_slot_ins;
5072 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5074 domain = mono_domain_get ();
5075 mono_domain_lock (domain);
5076 if (!domain_jit_info (domain)->method_code_hash)
5077 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5078 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5080 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5081 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5083 mono_domain_unlock (domain);
5085 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5087 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5090 if (cfg->llvm_only) {
5091 MonoInst *args [16];
5096 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5097 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5100 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5106 if (cfg->compile_aot) {
5107 MonoDelegateClassMethodPair *del_tramp;
5109 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5110 del_tramp->klass = klass;
5111 del_tramp->method = context_used ? NULL : method;
5112 del_tramp->is_virtual = virtual_;
5113 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5116 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5118 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5119 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5122 /* Set invoke_impl field */
5124 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5126 dreg = alloc_preg (cfg);
5127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5128 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5130 dreg = alloc_preg (cfg);
5131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5135 dreg = alloc_preg (cfg);
5136 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5139 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5145 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5147 MonoJitICallInfo *info;
5149 /* Need to register the icall so it gets an icall wrapper */
5150 info = mono_get_array_new_va_icall (rank);
5152 cfg->flags |= MONO_CFG_HAS_VARARGS;
5154 /* mono_array_new_va () needs a vararg calling convention */
5155 cfg->exception_message = g_strdup ("array-new");
5156 cfg->disable_llvm = TRUE;
5158 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5159 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5163 * handle_constrained_gsharedvt_call:
5165 * Handle constrained calls where the receiver is a gsharedvt type.
5166 * Return the instruction representing the call. Set the cfg exception on failure.
5169 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5170 gboolean *ref_emit_widen)
5172 MonoInst *ins = NULL;
5173 gboolean emit_widen = *ref_emit_widen;
5176 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5177 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5178 * pack the arguments into an array, and do the rest of the work in in an icall.
5180 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5181 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5182 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5183 MonoInst *args [16];
5186 * This case handles calls to
5187 * - object:ToString()/Equals()/GetHashCode(),
5188 * - System.IComparable<T>:CompareTo()
5189 * - System.IEquatable<T>:Equals ()
5190 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5194 if (mono_method_check_context_used (cmethod))
5195 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5197 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5198 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5200 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5201 if (fsig->hasthis && fsig->param_count) {
5202 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5203 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5204 ins->dreg = alloc_preg (cfg);
5205 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5206 MONO_ADD_INS (cfg->cbb, ins);
5209 if (mini_is_gsharedvt_type (fsig->params [0])) {
5210 int addr_reg, deref_arg_reg;
5212 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5213 deref_arg_reg = alloc_preg (cfg);
5214 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5215 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5217 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5218 addr_reg = ins->dreg;
5219 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5221 EMIT_NEW_ICONST (cfg, args [3], 0);
5222 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5225 EMIT_NEW_ICONST (cfg, args [3], 0);
5226 EMIT_NEW_ICONST (cfg, args [4], 0);
5228 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5231 if (mini_is_gsharedvt_type (fsig->ret)) {
5232 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5233 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5237 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5238 MONO_ADD_INS (cfg->cbb, add);
5240 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5241 MONO_ADD_INS (cfg->cbb, ins);
5242 /* ins represents the call result */
5245 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5248 *ref_emit_widen = emit_widen;
5257 mono_emit_load_got_addr (MonoCompile *cfg)
5259 MonoInst *getaddr, *dummy_use;
5261 if (!cfg->got_var || cfg->got_var_allocated)
5264 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5265 getaddr->cil_code = cfg->header->code;
5266 getaddr->dreg = cfg->got_var->dreg;
5268 /* Add it to the start of the first bblock */
5269 if (cfg->bb_entry->code) {
5270 getaddr->next = cfg->bb_entry->code;
5271 cfg->bb_entry->code = getaddr;
5274 MONO_ADD_INS (cfg->bb_entry, getaddr);
5276 cfg->got_var_allocated = TRUE;
5279 * Add a dummy use to keep the got_var alive, since real uses might
5280 * only be generated by the back ends.
5281 * Add it to end_bblock, so the variable's lifetime covers the whole
5283 * It would be better to make the usage of the got var explicit in all
5284 * cases when the backend needs it (i.e. calls, throw etc.), so this
5285 * wouldn't be needed.
5287 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5288 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5291 static int inline_limit;
5292 static gboolean inline_limit_inited;
5295 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5297 MonoMethodHeaderSummary header;
5299 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5300 MonoMethodSignature *sig = mono_method_signature (method);
5304 if (cfg->disable_inline)
5309 if (cfg->inline_depth > 10)
5312 if (!mono_method_get_header_summary (method, &header))
5315 /*runtime, icall and pinvoke are checked by summary call*/
5316 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5317 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5318 (mono_class_is_marshalbyref (method->klass)) ||
5322 /* also consider num_locals? */
5323 /* Do the size check early to avoid creating vtables */
5324 if (!inline_limit_inited) {
5325 if (g_getenv ("MONO_INLINELIMIT"))
5326 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5328 inline_limit = INLINE_LENGTH_LIMIT;
5329 inline_limit_inited = TRUE;
5331 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5335 * if we can initialize the class of the method right away, we do,
5336 * otherwise we don't allow inlining if the class needs initialization,
5337 * since it would mean inserting a call to mono_runtime_class_init()
5338 * inside the inlined code
5340 if (!(cfg->opt & MONO_OPT_SHARED)) {
5341 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5342 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5343 vtable = mono_class_vtable (cfg->domain, method->klass);
5346 if (!cfg->compile_aot) {
5348 if (!mono_runtime_class_init_full (vtable, &error)) {
5349 mono_error_cleanup (&error);
5353 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5354 if (cfg->run_cctors && method->klass->has_cctor) {
5355 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5356 if (!method->klass->runtime_info)
5357 /* No vtable created yet */
5359 vtable = mono_class_vtable (cfg->domain, method->klass);
5362 /* This makes so that inline cannot trigger */
5363 /* .cctors: too many apps depend on them */
5364 /* running with a specific order... */
5365 if (! vtable->initialized)
5368 if (!mono_runtime_class_init_full (vtable, &error)) {
5369 mono_error_cleanup (&error);
5373 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5374 if (!method->klass->runtime_info)
5375 /* No vtable created yet */
5377 vtable = mono_class_vtable (cfg->domain, method->klass);
5380 if (!vtable->initialized)
5385 * If we're compiling for shared code
5386 * the cctor will need to be run at aot method load time, for example,
5387 * or at the end of the compilation of the inlining method.
5389 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5393 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5394 if (mono_arch_is_soft_float ()) {
5396 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5398 for (i = 0; i < sig->param_count; ++i)
5399 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5404 if (g_list_find (cfg->dont_inline, method))
5411 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5413 if (!cfg->compile_aot) {
5415 if (vtable->initialized)
5419 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5420 if (cfg->method == method)
5424 if (!mono_class_needs_cctor_run (klass, method))
5427 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5428 /* The initialization is already done before the method is called */
5435 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5439 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5442 if (mini_is_gsharedvt_variable_klass (klass)) {
5445 mono_class_init (klass);
5446 size = mono_class_array_element_size (klass);
5449 mult_reg = alloc_preg (cfg);
5450 array_reg = arr->dreg;
5451 index_reg = index->dreg;
5453 #if SIZEOF_REGISTER == 8
5454 /* The array reg is 64 bits but the index reg is only 32 */
5455 if (COMPILE_LLVM (cfg)) {
5457 index2_reg = index_reg;
5459 index2_reg = alloc_preg (cfg);
5460 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5463 if (index->type == STACK_I8) {
5464 index2_reg = alloc_preg (cfg);
5465 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5467 index2_reg = index_reg;
5472 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5474 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5475 if (size == 1 || size == 2 || size == 4 || size == 8) {
5476 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5478 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5479 ins->klass = mono_class_get_element_class (klass);
5480 ins->type = STACK_MP;
5486 add_reg = alloc_ireg_mp (cfg);
5489 MonoInst *rgctx_ins;
5492 g_assert (cfg->gshared);
5493 context_used = mini_class_check_context_used (cfg, klass);
5494 g_assert (context_used);
5495 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5496 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5501 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5502 ins->klass = mono_class_get_element_class (klass);
5503 ins->type = STACK_MP;
5504 MONO_ADD_INS (cfg->cbb, ins);
5510 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5512 int bounds_reg = alloc_preg (cfg);
5513 int add_reg = alloc_ireg_mp (cfg);
5514 int mult_reg = alloc_preg (cfg);
5515 int mult2_reg = alloc_preg (cfg);
5516 int low1_reg = alloc_preg (cfg);
5517 int low2_reg = alloc_preg (cfg);
5518 int high1_reg = alloc_preg (cfg);
5519 int high2_reg = alloc_preg (cfg);
5520 int realidx1_reg = alloc_preg (cfg);
5521 int realidx2_reg = alloc_preg (cfg);
5522 int sum_reg = alloc_preg (cfg);
5523 int index1, index2, tmpreg;
5527 mono_class_init (klass);
5528 size = mono_class_array_element_size (klass);
5530 index1 = index_ins1->dreg;
5531 index2 = index_ins2->dreg;
5533 #if SIZEOF_REGISTER == 8
5534 /* The array reg is 64 bits but the index reg is only 32 */
5535 if (COMPILE_LLVM (cfg)) {
5538 tmpreg = alloc_preg (cfg);
5539 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5541 tmpreg = alloc_preg (cfg);
5542 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5546 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5550 /* range checking */
5551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5552 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5554 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5555 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5556 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5557 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5558 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5560 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5563 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5564 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5565 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5566 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5567 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5568 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5570 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5571 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5573 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5574 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5576 ins->type = STACK_MP;
5578 MONO_ADD_INS (cfg->cbb, ins);
5584 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5588 MonoMethod *addr_method;
5590 MonoClass *eclass = cmethod->klass->element_class;
5592 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5595 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5597 /* emit_ldelema_2 depends on OP_LMUL */
5598 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5599 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5602 if (mini_is_gsharedvt_variable_klass (eclass))
5605 element_size = mono_class_array_element_size (eclass);
5606 addr_method = mono_marshal_get_array_address (rank, element_size);
5607 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5612 static MonoBreakPolicy
5613 always_insert_breakpoint (MonoMethod *method)
5615 return MONO_BREAK_POLICY_ALWAYS;
5618 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5621 * mono_set_break_policy:
5622 * policy_callback: the new callback function
5624 * Allow embedders to decide wherther to actually obey breakpoint instructions
5625 * (both break IL instructions and Debugger.Break () method calls), for example
5626 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5627 * untrusted or semi-trusted code.
5629 * @policy_callback will be called every time a break point instruction needs to
5630 * be inserted with the method argument being the method that calls Debugger.Break()
5631 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5632 * if it wants the breakpoint to not be effective in the given method.
5633 * #MONO_BREAK_POLICY_ALWAYS is the default.
5636 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5638 if (policy_callback)
5639 break_policy_func = policy_callback;
5641 break_policy_func = always_insert_breakpoint;
5645 should_insert_brekpoint (MonoMethod *method) {
5646 switch (break_policy_func (method)) {
5647 case MONO_BREAK_POLICY_ALWAYS:
5649 case MONO_BREAK_POLICY_NEVER:
5651 case MONO_BREAK_POLICY_ON_DBG:
5652 g_warning ("mdb no longer supported");
5655 g_warning ("Incorrect value returned from break policy callback");
5660 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5662 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5664 MonoInst *addr, *store, *load;
5665 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5667 /* the bounds check is already done by the callers */
5668 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5670 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5671 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5672 if (mini_type_is_reference (fsig->params [2]))
5673 emit_write_barrier (cfg, addr, load);
5675 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5676 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5683 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5685 return mini_type_is_reference (&klass->byval_arg);
5689 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5691 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5692 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5693 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5694 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5695 MonoInst *iargs [3];
5698 mono_class_setup_vtable (obj_array);
5699 g_assert (helper->slot);
5701 if (sp [0]->type != STACK_OBJ)
5703 if (sp [2]->type != STACK_OBJ)
5710 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5714 if (mini_is_gsharedvt_variable_klass (klass)) {
5717 // FIXME-VT: OP_ICONST optimization
5718 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5719 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5720 ins->opcode = OP_STOREV_MEMBASE;
5721 } else if (sp [1]->opcode == OP_ICONST) {
5722 int array_reg = sp [0]->dreg;
5723 int index_reg = sp [1]->dreg;
5724 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5726 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5727 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5730 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5731 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5733 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5734 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5735 if (generic_class_is_reference_type (cfg, klass))
5736 emit_write_barrier (cfg, addr, sp [2]);
5743 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5748 eklass = mono_class_from_mono_type (fsig->params [2]);
5750 eklass = mono_class_from_mono_type (fsig->ret);
5753 return emit_array_store (cfg, eklass, args, FALSE);
5755 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5756 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5762 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5765 int param_size, return_size;
5767 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5768 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5770 if (cfg->verbose_level > 3)
5771 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5773 //Don't allow mixing reference types with value types
5774 if (param_klass->valuetype != return_klass->valuetype) {
5775 if (cfg->verbose_level > 3)
5776 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5780 if (!param_klass->valuetype) {
5781 if (cfg->verbose_level > 3)
5782 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5787 if (param_klass->has_references || return_klass->has_references)
5790 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5791 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5792 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5793 if (cfg->verbose_level > 3)
5794 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5798 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5799 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5800 if (cfg->verbose_level > 3)
5801 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5805 param_size = mono_class_value_size (param_klass, &align);
5806 return_size = mono_class_value_size (return_klass, &align);
5808 //We can do it if sizes match
5809 if (param_size == return_size) {
5810 if (cfg->verbose_level > 3)
5811 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5815 //No simple way to handle struct if sizes don't match
5816 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5817 if (cfg->verbose_level > 3)
5818 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5823 * Same reg size category.
5824 * A quick note on why we don't require widening here.
5825 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5827 * Since the source value comes from a function argument, the JIT will already have
5828 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5830 if (param_size <= 4 && return_size <= 4) {
5831 if (cfg->verbose_level > 3)
5832 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5840 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5842 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5843 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5845 if (mini_is_gsharedvt_variable_type (fsig->ret))
5848 //Valuetypes that are semantically equivalent or numbers than can be widened to
5849 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5852 //Arrays of valuetypes that are semantically equivalent
5853 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5860 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5862 #ifdef MONO_ARCH_SIMD_INTRINSICS
5863 MonoInst *ins = NULL;
5865 if (cfg->opt & MONO_OPT_SIMD) {
5866 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5872 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5876 emit_memory_barrier (MonoCompile *cfg, int kind)
5878 MonoInst *ins = NULL;
5879 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5880 MONO_ADD_INS (cfg->cbb, ins);
5881 ins->backend.memory_barrier_kind = kind;
5887 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5889 MonoInst *ins = NULL;
5892 /* The LLVM backend supports these intrinsics */
5893 if (cmethod->klass == mono_defaults.math_class) {
5894 if (strcmp (cmethod->name, "Sin") == 0) {
5896 } else if (strcmp (cmethod->name, "Cos") == 0) {
5898 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5900 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5904 if (opcode && fsig->param_count == 1) {
5905 MONO_INST_NEW (cfg, ins, opcode);
5906 ins->type = STACK_R8;
5907 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5908 ins->sreg1 = args [0]->dreg;
5909 MONO_ADD_INS (cfg->cbb, ins);
5913 if (cfg->opt & MONO_OPT_CMOV) {
5914 if (strcmp (cmethod->name, "Min") == 0) {
5915 if (fsig->params [0]->type == MONO_TYPE_I4)
5917 if (fsig->params [0]->type == MONO_TYPE_U4)
5918 opcode = OP_IMIN_UN;
5919 else if (fsig->params [0]->type == MONO_TYPE_I8)
5921 else if (fsig->params [0]->type == MONO_TYPE_U8)
5922 opcode = OP_LMIN_UN;
5923 } else if (strcmp (cmethod->name, "Max") == 0) {
5924 if (fsig->params [0]->type == MONO_TYPE_I4)
5926 if (fsig->params [0]->type == MONO_TYPE_U4)
5927 opcode = OP_IMAX_UN;
5928 else if (fsig->params [0]->type == MONO_TYPE_I8)
5930 else if (fsig->params [0]->type == MONO_TYPE_U8)
5931 opcode = OP_LMAX_UN;
5935 if (opcode && fsig->param_count == 2) {
5936 MONO_INST_NEW (cfg, ins, opcode);
5937 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5938 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5939 ins->sreg1 = args [0]->dreg;
5940 ins->sreg2 = args [1]->dreg;
5941 MONO_ADD_INS (cfg->cbb, ins);
5949 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5951 if (cmethod->klass == mono_defaults.array_class) {
5952 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5953 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5954 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5955 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5956 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5957 return emit_array_unsafe_mov (cfg, fsig, args);
5964 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5966 MonoInst *ins = NULL;
5968 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5970 if (cmethod->klass == mono_defaults.string_class) {
5971 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5972 int dreg = alloc_ireg (cfg);
5973 int index_reg = alloc_preg (cfg);
5974 int add_reg = alloc_preg (cfg);
5976 #if SIZEOF_REGISTER == 8
5977 if (COMPILE_LLVM (cfg)) {
5978 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5980 /* The array reg is 64 bits but the index reg is only 32 */
5981 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5984 index_reg = args [1]->dreg;
5986 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5988 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5989 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5990 add_reg = ins->dreg;
5991 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5994 int mult_reg = alloc_preg (cfg);
5995 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5996 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5997 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5998 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
6000 type_from_op (cfg, ins, NULL, NULL);
6002 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6003 int dreg = alloc_ireg (cfg);
6004 /* Decompose later to allow more optimizations */
6005 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
6006 ins->type = STACK_I4;
6007 ins->flags |= MONO_INST_FAULT;
6008 cfg->cbb->has_array_access = TRUE;
6009 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
6014 } else if (cmethod->klass == mono_defaults.object_class) {
6015 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
6016 int dreg = alloc_ireg_ref (cfg);
6017 int vt_reg = alloc_preg (cfg);
6018 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6019 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
6020 type_from_op (cfg, ins, NULL, NULL);
6023 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6024 int dreg = alloc_ireg (cfg);
6025 int t1 = alloc_ireg (cfg);
6027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6028 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6029 ins->type = STACK_I4;
6032 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6033 MONO_INST_NEW (cfg, ins, OP_NOP);
6034 MONO_ADD_INS (cfg->cbb, ins);
6038 } else if (cmethod->klass == mono_defaults.array_class) {
6039 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6040 return emit_array_generic_access (cfg, fsig, args, FALSE);
6041 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6042 return emit_array_generic_access (cfg, fsig, args, TRUE);
6044 #ifndef MONO_BIG_ARRAYS
6046 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6049 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6050 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6051 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6052 int dreg = alloc_ireg (cfg);
6053 int bounds_reg = alloc_ireg_mp (cfg);
6054 MonoBasicBlock *end_bb, *szarray_bb;
6055 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6057 NEW_BBLOCK (cfg, end_bb);
6058 NEW_BBLOCK (cfg, szarray_bb);
6060 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6061 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6062 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6063 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6064 /* Non-szarray case */
6066 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6067 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6069 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6070 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6072 MONO_START_BB (cfg, szarray_bb);
6075 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6076 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6078 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6079 MONO_START_BB (cfg, end_bb);
6081 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6082 ins->type = STACK_I4;
6088 if (cmethod->name [0] != 'g')
6091 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6092 int dreg = alloc_ireg (cfg);
6093 int vtable_reg = alloc_preg (cfg);
6094 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6095 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6096 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6097 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6098 type_from_op (cfg, ins, NULL, NULL);
6101 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6102 int dreg = alloc_ireg (cfg);
6104 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6105 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6106 type_from_op (cfg, ins, NULL, NULL);
6111 } else if (cmethod->klass == runtime_helpers_class) {
6112 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6113 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6117 } else if (cmethod->klass == mono_defaults.monitor_class) {
6118 gboolean is_enter = FALSE;
6120 if (!strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1)
6125 * To make async stack traces work, icalls which can block should have a wrapper.
6126 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6128 MonoBasicBlock *end_bb;
6130 NEW_BBLOCK (cfg, end_bb);
6132 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter_fast, args);
6133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6135 ins = mono_emit_jit_icall (cfg, (gpointer)mono_monitor_enter, args);
6136 MONO_START_BB (cfg, end_bb);
6139 } else if (cmethod->klass == mono_defaults.thread_class) {
6140 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6141 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6142 MONO_ADD_INS (cfg->cbb, ins);
6144 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6145 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6146 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6148 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6150 if (fsig->params [0]->type == MONO_TYPE_I1)
6151 opcode = OP_LOADI1_MEMBASE;
6152 else if (fsig->params [0]->type == MONO_TYPE_U1)
6153 opcode = OP_LOADU1_MEMBASE;
6154 else if (fsig->params [0]->type == MONO_TYPE_I2)
6155 opcode = OP_LOADI2_MEMBASE;
6156 else if (fsig->params [0]->type == MONO_TYPE_U2)
6157 opcode = OP_LOADU2_MEMBASE;
6158 else if (fsig->params [0]->type == MONO_TYPE_I4)
6159 opcode = OP_LOADI4_MEMBASE;
6160 else if (fsig->params [0]->type == MONO_TYPE_U4)
6161 opcode = OP_LOADU4_MEMBASE;
6162 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6163 opcode = OP_LOADI8_MEMBASE;
6164 else if (fsig->params [0]->type == MONO_TYPE_R4)
6165 opcode = OP_LOADR4_MEMBASE;
6166 else if (fsig->params [0]->type == MONO_TYPE_R8)
6167 opcode = OP_LOADR8_MEMBASE;
6168 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6169 opcode = OP_LOAD_MEMBASE;
6172 MONO_INST_NEW (cfg, ins, opcode);
6173 ins->inst_basereg = args [0]->dreg;
6174 ins->inst_offset = 0;
6175 MONO_ADD_INS (cfg->cbb, ins);
6177 switch (fsig->params [0]->type) {
6184 ins->dreg = mono_alloc_ireg (cfg);
6185 ins->type = STACK_I4;
6189 ins->dreg = mono_alloc_lreg (cfg);
6190 ins->type = STACK_I8;
6194 ins->dreg = mono_alloc_ireg (cfg);
6195 #if SIZEOF_REGISTER == 8
6196 ins->type = STACK_I8;
6198 ins->type = STACK_I4;
6203 ins->dreg = mono_alloc_freg (cfg);
6204 ins->type = STACK_R8;
6207 g_assert (mini_type_is_reference (fsig->params [0]));
6208 ins->dreg = mono_alloc_ireg_ref (cfg);
6209 ins->type = STACK_OBJ;
6213 if (opcode == OP_LOADI8_MEMBASE)
6214 ins = mono_decompose_opcode (cfg, ins);
6216 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6220 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6222 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6224 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6225 opcode = OP_STOREI1_MEMBASE_REG;
6226 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6227 opcode = OP_STOREI2_MEMBASE_REG;
6228 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6229 opcode = OP_STOREI4_MEMBASE_REG;
6230 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6231 opcode = OP_STOREI8_MEMBASE_REG;
6232 else if (fsig->params [0]->type == MONO_TYPE_R4)
6233 opcode = OP_STORER4_MEMBASE_REG;
6234 else if (fsig->params [0]->type == MONO_TYPE_R8)
6235 opcode = OP_STORER8_MEMBASE_REG;
6236 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6237 opcode = OP_STORE_MEMBASE_REG;
6240 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6242 MONO_INST_NEW (cfg, ins, opcode);
6243 ins->sreg1 = args [1]->dreg;
6244 ins->inst_destbasereg = args [0]->dreg;
6245 ins->inst_offset = 0;
6246 MONO_ADD_INS (cfg->cbb, ins);
6248 if (opcode == OP_STOREI8_MEMBASE_REG)
6249 ins = mono_decompose_opcode (cfg, ins);
6254 } else if (cmethod->klass->image == mono_defaults.corlib &&
6255 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6256 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6259 #if SIZEOF_REGISTER == 8
6260 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6261 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6262 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6263 ins->dreg = mono_alloc_preg (cfg);
6264 ins->sreg1 = args [0]->dreg;
6265 ins->type = STACK_I8;
6266 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6267 MONO_ADD_INS (cfg->cbb, ins);
6271 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6273 /* 64 bit reads are already atomic */
6274 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6275 load_ins->dreg = mono_alloc_preg (cfg);
6276 load_ins->inst_basereg = args [0]->dreg;
6277 load_ins->inst_offset = 0;
6278 load_ins->type = STACK_I8;
6279 MONO_ADD_INS (cfg->cbb, load_ins);
6281 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6288 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6289 MonoInst *ins_iconst;
6292 if (fsig->params [0]->type == MONO_TYPE_I4) {
6293 opcode = OP_ATOMIC_ADD_I4;
6294 cfg->has_atomic_add_i4 = TRUE;
6296 #if SIZEOF_REGISTER == 8
6297 else if (fsig->params [0]->type == MONO_TYPE_I8)
6298 opcode = OP_ATOMIC_ADD_I8;
6301 if (!mono_arch_opcode_supported (opcode))
6303 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6304 ins_iconst->inst_c0 = 1;
6305 ins_iconst->dreg = mono_alloc_ireg (cfg);
6306 MONO_ADD_INS (cfg->cbb, ins_iconst);
6308 MONO_INST_NEW (cfg, ins, opcode);
6309 ins->dreg = mono_alloc_ireg (cfg);
6310 ins->inst_basereg = args [0]->dreg;
6311 ins->inst_offset = 0;
6312 ins->sreg2 = ins_iconst->dreg;
6313 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6314 MONO_ADD_INS (cfg->cbb, ins);
6316 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6317 MonoInst *ins_iconst;
6320 if (fsig->params [0]->type == MONO_TYPE_I4) {
6321 opcode = OP_ATOMIC_ADD_I4;
6322 cfg->has_atomic_add_i4 = TRUE;
6324 #if SIZEOF_REGISTER == 8
6325 else if (fsig->params [0]->type == MONO_TYPE_I8)
6326 opcode = OP_ATOMIC_ADD_I8;
6329 if (!mono_arch_opcode_supported (opcode))
6331 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6332 ins_iconst->inst_c0 = -1;
6333 ins_iconst->dreg = mono_alloc_ireg (cfg);
6334 MONO_ADD_INS (cfg->cbb, ins_iconst);
6336 MONO_INST_NEW (cfg, ins, opcode);
6337 ins->dreg = mono_alloc_ireg (cfg);
6338 ins->inst_basereg = args [0]->dreg;
6339 ins->inst_offset = 0;
6340 ins->sreg2 = ins_iconst->dreg;
6341 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6342 MONO_ADD_INS (cfg->cbb, ins);
6344 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6347 if (fsig->params [0]->type == MONO_TYPE_I4) {
6348 opcode = OP_ATOMIC_ADD_I4;
6349 cfg->has_atomic_add_i4 = TRUE;
6351 #if SIZEOF_REGISTER == 8
6352 else if (fsig->params [0]->type == MONO_TYPE_I8)
6353 opcode = OP_ATOMIC_ADD_I8;
6356 if (!mono_arch_opcode_supported (opcode))
6358 MONO_INST_NEW (cfg, ins, opcode);
6359 ins->dreg = mono_alloc_ireg (cfg);
6360 ins->inst_basereg = args [0]->dreg;
6361 ins->inst_offset = 0;
6362 ins->sreg2 = args [1]->dreg;
6363 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6364 MONO_ADD_INS (cfg->cbb, ins);
6367 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6368 MonoInst *f2i = NULL, *i2f;
6369 guint32 opcode, f2i_opcode, i2f_opcode;
6370 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6371 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6373 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6374 fsig->params [0]->type == MONO_TYPE_R4) {
6375 opcode = OP_ATOMIC_EXCHANGE_I4;
6376 f2i_opcode = OP_MOVE_F_TO_I4;
6377 i2f_opcode = OP_MOVE_I4_TO_F;
6378 cfg->has_atomic_exchange_i4 = TRUE;
6380 #if SIZEOF_REGISTER == 8
6382 fsig->params [0]->type == MONO_TYPE_I8 ||
6383 fsig->params [0]->type == MONO_TYPE_R8 ||
6384 fsig->params [0]->type == MONO_TYPE_I) {
6385 opcode = OP_ATOMIC_EXCHANGE_I8;
6386 f2i_opcode = OP_MOVE_F_TO_I8;
6387 i2f_opcode = OP_MOVE_I8_TO_F;
6390 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6391 opcode = OP_ATOMIC_EXCHANGE_I4;
6392 cfg->has_atomic_exchange_i4 = TRUE;
6398 if (!mono_arch_opcode_supported (opcode))
6402 /* TODO: Decompose these opcodes instead of bailing here. */
6403 if (COMPILE_SOFT_FLOAT (cfg))
6406 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6407 f2i->dreg = mono_alloc_ireg (cfg);
6408 f2i->sreg1 = args [1]->dreg;
6409 if (f2i_opcode == OP_MOVE_F_TO_I4)
6410 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6411 MONO_ADD_INS (cfg->cbb, f2i);
6414 MONO_INST_NEW (cfg, ins, opcode);
6415 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6416 ins->inst_basereg = args [0]->dreg;
6417 ins->inst_offset = 0;
6418 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6419 MONO_ADD_INS (cfg->cbb, ins);
6421 switch (fsig->params [0]->type) {
6423 ins->type = STACK_I4;
6426 ins->type = STACK_I8;
6429 #if SIZEOF_REGISTER == 8
6430 ins->type = STACK_I8;
6432 ins->type = STACK_I4;
6437 ins->type = STACK_R8;
6440 g_assert (mini_type_is_reference (fsig->params [0]));
6441 ins->type = STACK_OBJ;
6446 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6447 i2f->dreg = mono_alloc_freg (cfg);
6448 i2f->sreg1 = ins->dreg;
6449 i2f->type = STACK_R8;
6450 if (i2f_opcode == OP_MOVE_I4_TO_F)
6451 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6452 MONO_ADD_INS (cfg->cbb, i2f);
6457 if (cfg->gen_write_barriers && is_ref)
6458 emit_write_barrier (cfg, args [0], args [1]);
6460 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6461 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6462 guint32 opcode, f2i_opcode, i2f_opcode;
6463 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6464 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6466 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6467 fsig->params [1]->type == MONO_TYPE_R4) {
6468 opcode = OP_ATOMIC_CAS_I4;
6469 f2i_opcode = OP_MOVE_F_TO_I4;
6470 i2f_opcode = OP_MOVE_I4_TO_F;
6471 cfg->has_atomic_cas_i4 = TRUE;
6473 #if SIZEOF_REGISTER == 8
6475 fsig->params [1]->type == MONO_TYPE_I8 ||
6476 fsig->params [1]->type == MONO_TYPE_R8 ||
6477 fsig->params [1]->type == MONO_TYPE_I) {
6478 opcode = OP_ATOMIC_CAS_I8;
6479 f2i_opcode = OP_MOVE_F_TO_I8;
6480 i2f_opcode = OP_MOVE_I8_TO_F;
6483 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6484 opcode = OP_ATOMIC_CAS_I4;
6485 cfg->has_atomic_cas_i4 = TRUE;
6491 if (!mono_arch_opcode_supported (opcode))
6495 /* TODO: Decompose these opcodes instead of bailing here. */
6496 if (COMPILE_SOFT_FLOAT (cfg))
6499 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6500 f2i_new->dreg = mono_alloc_ireg (cfg);
6501 f2i_new->sreg1 = args [1]->dreg;
6502 if (f2i_opcode == OP_MOVE_F_TO_I4)
6503 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6504 MONO_ADD_INS (cfg->cbb, f2i_new);
6506 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6507 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6508 f2i_cmp->sreg1 = args [2]->dreg;
6509 if (f2i_opcode == OP_MOVE_F_TO_I4)
6510 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6511 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6514 MONO_INST_NEW (cfg, ins, opcode);
6515 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6516 ins->sreg1 = args [0]->dreg;
6517 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6518 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6519 MONO_ADD_INS (cfg->cbb, ins);
6521 switch (fsig->params [1]->type) {
6523 ins->type = STACK_I4;
6526 ins->type = STACK_I8;
6529 #if SIZEOF_REGISTER == 8
6530 ins->type = STACK_I8;
6532 ins->type = STACK_I4;
6536 ins->type = cfg->r4_stack_type;
6539 ins->type = STACK_R8;
6542 g_assert (mini_type_is_reference (fsig->params [1]));
6543 ins->type = STACK_OBJ;
6548 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6549 i2f->dreg = mono_alloc_freg (cfg);
6550 i2f->sreg1 = ins->dreg;
6551 i2f->type = STACK_R8;
6552 if (i2f_opcode == OP_MOVE_I4_TO_F)
6553 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6554 MONO_ADD_INS (cfg->cbb, i2f);
6559 if (cfg->gen_write_barriers && is_ref)
6560 emit_write_barrier (cfg, args [0], args [1]);
6562 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6563 fsig->params [1]->type == MONO_TYPE_I4) {
6564 MonoInst *cmp, *ceq;
6566 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6569 /* int32 r = CAS (location, value, comparand); */
6570 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6571 ins->dreg = alloc_ireg (cfg);
6572 ins->sreg1 = args [0]->dreg;
6573 ins->sreg2 = args [1]->dreg;
6574 ins->sreg3 = args [2]->dreg;
6575 ins->type = STACK_I4;
6576 MONO_ADD_INS (cfg->cbb, ins);
6578 /* bool result = r == comparand; */
6579 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6580 cmp->sreg1 = ins->dreg;
6581 cmp->sreg2 = args [2]->dreg;
6582 cmp->type = STACK_I4;
6583 MONO_ADD_INS (cfg->cbb, cmp);
6585 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6586 ceq->dreg = alloc_ireg (cfg);
6587 ceq->type = STACK_I4;
6588 MONO_ADD_INS (cfg->cbb, ceq);
6590 /* *success = result; */
6591 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6593 cfg->has_atomic_cas_i4 = TRUE;
6595 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6596 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6600 } else if (cmethod->klass->image == mono_defaults.corlib &&
6601 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6602 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6605 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6607 MonoType *t = fsig->params [0];
6609 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6611 g_assert (t->byref);
6612 /* t is a byref type, so the reference check is more complicated */
6613 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6614 if (t->type == MONO_TYPE_I1)
6615 opcode = OP_ATOMIC_LOAD_I1;
6616 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6617 opcode = OP_ATOMIC_LOAD_U1;
6618 else if (t->type == MONO_TYPE_I2)
6619 opcode = OP_ATOMIC_LOAD_I2;
6620 else if (t->type == MONO_TYPE_U2)
6621 opcode = OP_ATOMIC_LOAD_U2;
6622 else if (t->type == MONO_TYPE_I4)
6623 opcode = OP_ATOMIC_LOAD_I4;
6624 else if (t->type == MONO_TYPE_U4)
6625 opcode = OP_ATOMIC_LOAD_U4;
6626 else if (t->type == MONO_TYPE_R4)
6627 opcode = OP_ATOMIC_LOAD_R4;
6628 else if (t->type == MONO_TYPE_R8)
6629 opcode = OP_ATOMIC_LOAD_R8;
6630 #if SIZEOF_REGISTER == 8
6631 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6632 opcode = OP_ATOMIC_LOAD_I8;
6633 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6634 opcode = OP_ATOMIC_LOAD_U8;
6636 else if (t->type == MONO_TYPE_I)
6637 opcode = OP_ATOMIC_LOAD_I4;
6638 else if (is_ref || t->type == MONO_TYPE_U)
6639 opcode = OP_ATOMIC_LOAD_U4;
6643 if (!mono_arch_opcode_supported (opcode))
6646 MONO_INST_NEW (cfg, ins, opcode);
6647 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6648 ins->sreg1 = args [0]->dreg;
6649 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6650 MONO_ADD_INS (cfg->cbb, ins);
6653 case MONO_TYPE_BOOLEAN:
6660 ins->type = STACK_I4;
6664 ins->type = STACK_I8;
6668 #if SIZEOF_REGISTER == 8
6669 ins->type = STACK_I8;
6671 ins->type = STACK_I4;
6675 ins->type = cfg->r4_stack_type;
6678 ins->type = STACK_R8;
6682 ins->type = STACK_OBJ;
6688 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6690 MonoType *t = fsig->params [0];
6693 g_assert (t->byref);
6694 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6695 if (t->type == MONO_TYPE_I1)
6696 opcode = OP_ATOMIC_STORE_I1;
6697 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6698 opcode = OP_ATOMIC_STORE_U1;
6699 else if (t->type == MONO_TYPE_I2)
6700 opcode = OP_ATOMIC_STORE_I2;
6701 else if (t->type == MONO_TYPE_U2)
6702 opcode = OP_ATOMIC_STORE_U2;
6703 else if (t->type == MONO_TYPE_I4)
6704 opcode = OP_ATOMIC_STORE_I4;
6705 else if (t->type == MONO_TYPE_U4)
6706 opcode = OP_ATOMIC_STORE_U4;
6707 else if (t->type == MONO_TYPE_R4)
6708 opcode = OP_ATOMIC_STORE_R4;
6709 else if (t->type == MONO_TYPE_R8)
6710 opcode = OP_ATOMIC_STORE_R8;
6711 #if SIZEOF_REGISTER == 8
6712 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6713 opcode = OP_ATOMIC_STORE_I8;
6714 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6715 opcode = OP_ATOMIC_STORE_U8;
6717 else if (t->type == MONO_TYPE_I)
6718 opcode = OP_ATOMIC_STORE_I4;
6719 else if (is_ref || t->type == MONO_TYPE_U)
6720 opcode = OP_ATOMIC_STORE_U4;
6724 if (!mono_arch_opcode_supported (opcode))
6727 MONO_INST_NEW (cfg, ins, opcode);
6728 ins->dreg = args [0]->dreg;
6729 ins->sreg1 = args [1]->dreg;
6730 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6731 MONO_ADD_INS (cfg->cbb, ins);
6733 if (cfg->gen_write_barriers && is_ref)
6734 emit_write_barrier (cfg, args [0], args [1]);
6740 } else if (cmethod->klass->image == mono_defaults.corlib &&
6741 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6742 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6743 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6744 if (should_insert_brekpoint (cfg->method)) {
6745 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6747 MONO_INST_NEW (cfg, ins, OP_NOP);
6748 MONO_ADD_INS (cfg->cbb, ins);
6752 } else if (cmethod->klass->image == mono_defaults.corlib &&
6753 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6754 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6755 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6757 EMIT_NEW_ICONST (cfg, ins, 1);
6759 EMIT_NEW_ICONST (cfg, ins, 0);
6762 } else if (cmethod->klass->image == mono_defaults.corlib &&
6763 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6764 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6765 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6766 /* No stack walks are currently available, so implement this as an intrinsic */
6767 MonoInst *assembly_ins;
6769 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6770 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6773 } else if (cmethod->klass->image == mono_defaults.corlib &&
6774 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6775 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6776 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6777 /* No stack walks are currently available, so implement this as an intrinsic */
6778 MonoInst *method_ins;
6779 MonoMethod *declaring = cfg->method;
6781 /* This returns the declaring generic method */
6782 if (declaring->is_inflated)
6783 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6784 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6785 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6786 cfg->no_inline = TRUE;
6787 if (cfg->method != cfg->current_method)
6788 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6791 } else if (cmethod->klass == mono_defaults.math_class) {
6793 * There is general branchless code for Min/Max, but it does not work for
6795 * http://everything2.com/?node_id=1051618
6797 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6798 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6799 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6800 !strcmp (cmethod->klass->name, "Selector")) ||
6801 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6802 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6803 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6804 !strcmp (cmethod->klass->name, "Selector"))
6806 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6807 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6808 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6811 MonoJumpInfoToken *ji;
6814 if (args [0]->opcode == OP_GOT_ENTRY) {
6815 pi = (MonoInst *)args [0]->inst_p1;
6816 g_assert (pi->opcode == OP_PATCH_INFO);
6817 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6818 ji = (MonoJumpInfoToken *)pi->inst_p0;
6820 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6821 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6824 NULLIFY_INS (args [0]);
6826 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6827 return_val_if_nok (&cfg->error, NULL);
6829 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6830 ins->dreg = mono_alloc_ireg (cfg);
6833 MONO_ADD_INS (cfg->cbb, ins);
6838 #ifdef MONO_ARCH_SIMD_INTRINSICS
6839 if (cfg->opt & MONO_OPT_SIMD) {
6840 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6846 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6850 if (COMPILE_LLVM (cfg)) {
6851 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6856 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6860 * This entry point could be used later for arbitrary method
6863 inline static MonoInst*
6864 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6865 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6867 if (method->klass == mono_defaults.string_class) {
6868 /* managed string allocation support */
6869 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6870 MonoInst *iargs [2];
6871 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6872 MonoMethod *managed_alloc = NULL;
6874 g_assert (vtable); /*Should not fail since it System.String*/
6875 #ifndef MONO_CROSS_COMPILE
6876 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6880 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6881 iargs [1] = args [0];
6882 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6889 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6891 MonoInst *store, *temp;
6894 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6895 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6898 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6899 * would be different than the MonoInst's used to represent arguments, and
6900 * the ldelema implementation can't deal with that.
6901 * Solution: When ldelema is used on an inline argument, create a var for
6902 * it, emit ldelema on that var, and emit the saving code below in
6903 * inline_method () if needed.
6905 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6906 cfg->args [i] = temp;
6907 /* This uses cfg->args [i] which is set by the preceeding line */
6908 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6909 store->cil_code = sp [0]->cil_code;
6914 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6915 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6917 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6919 check_inline_called_method_name_limit (MonoMethod *called_method)
6922 static const char *limit = NULL;
6924 if (limit == NULL) {
6925 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6927 if (limit_string != NULL)
6928 limit = limit_string;
6933 if (limit [0] != '\0') {
6934 char *called_method_name = mono_method_full_name (called_method, TRUE);
6936 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6937 g_free (called_method_name);
6939 //return (strncmp_result <= 0);
6940 return (strncmp_result == 0);
6947 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6949 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6952 static const char *limit = NULL;
6954 if (limit == NULL) {
6955 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6956 if (limit_string != NULL) {
6957 limit = limit_string;
6963 if (limit [0] != '\0') {
6964 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6966 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6967 g_free (caller_method_name);
6969 //return (strncmp_result <= 0);
6970 return (strncmp_result == 0);
6978 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6980 static double r8_0 = 0.0;
6981 static float r4_0 = 0.0;
6985 rtype = mini_get_underlying_type (rtype);
6989 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6990 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6991 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6992 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6993 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6994 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6995 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6996 ins->type = STACK_R4;
6997 ins->inst_p0 = (void*)&r4_0;
6999 MONO_ADD_INS (cfg->cbb, ins);
7000 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7001 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7002 ins->type = STACK_R8;
7003 ins->inst_p0 = (void*)&r8_0;
7005 MONO_ADD_INS (cfg->cbb, ins);
7006 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7007 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7008 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7009 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7010 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7012 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7017 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7021 rtype = mini_get_underlying_type (rtype);
7025 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7026 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7027 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7028 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7029 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7030 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7031 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7032 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7033 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7034 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7035 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7036 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7037 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7038 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7040 emit_init_rvar (cfg, dreg, rtype);
7044 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7046 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7048 MonoInst *var = cfg->locals [local];
7049 if (COMPILE_SOFT_FLOAT (cfg)) {
7051 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7052 emit_init_rvar (cfg, reg, type);
7053 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7056 emit_init_rvar (cfg, var->dreg, type);
7058 emit_dummy_init_rvar (cfg, var->dreg, type);
7065 * Return the cost of inlining CMETHOD.
7068 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7069 guchar *ip, guint real_offset, gboolean inline_always)
7072 MonoInst *ins, *rvar = NULL;
7073 MonoMethodHeader *cheader;
7074 MonoBasicBlock *ebblock, *sbblock;
7076 MonoMethod *prev_inlined_method;
7077 MonoInst **prev_locals, **prev_args;
7078 MonoType **prev_arg_types;
7079 guint prev_real_offset;
7080 GHashTable *prev_cbb_hash;
7081 MonoBasicBlock **prev_cil_offset_to_bb;
7082 MonoBasicBlock *prev_cbb;
7083 const unsigned char *prev_ip;
7084 unsigned char *prev_cil_start;
7085 guint32 prev_cil_offset_to_bb_len;
7086 MonoMethod *prev_current_method;
7087 MonoGenericContext *prev_generic_context;
7088 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7090 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7092 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7093 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7096 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7097 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7102 fsig = mono_method_signature (cmethod);
7104 if (cfg->verbose_level > 2)
7105 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7107 if (!cmethod->inline_info) {
7108 cfg->stat_inlineable_methods++;
7109 cmethod->inline_info = 1;
7112 /* allocate local variables */
7113 cheader = mono_method_get_header_checked (cmethod, &error);
7115 if (inline_always) {
7116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7117 mono_error_move (&cfg->error, &error);
7119 mono_error_cleanup (&error);
7124 /*Must verify before creating locals as it can cause the JIT to assert.*/
7125 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7126 mono_metadata_free_mh (cheader);
7130 /* allocate space to store the return value */
7131 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7132 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7135 prev_locals = cfg->locals;
7136 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7137 for (i = 0; i < cheader->num_locals; ++i)
7138 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7140 /* allocate start and end blocks */
7141 /* This is needed so if the inline is aborted, we can clean up */
7142 NEW_BBLOCK (cfg, sbblock);
7143 sbblock->real_offset = real_offset;
7145 NEW_BBLOCK (cfg, ebblock);
7146 ebblock->block_num = cfg->num_bblocks++;
7147 ebblock->real_offset = real_offset;
7149 prev_args = cfg->args;
7150 prev_arg_types = cfg->arg_types;
7151 prev_inlined_method = cfg->inlined_method;
7152 cfg->inlined_method = cmethod;
7153 cfg->ret_var_set = FALSE;
7154 cfg->inline_depth ++;
7155 prev_real_offset = cfg->real_offset;
7156 prev_cbb_hash = cfg->cbb_hash;
7157 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7158 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7159 prev_cil_start = cfg->cil_start;
7161 prev_cbb = cfg->cbb;
7162 prev_current_method = cfg->current_method;
7163 prev_generic_context = cfg->generic_context;
7164 prev_ret_var_set = cfg->ret_var_set;
7165 prev_disable_inline = cfg->disable_inline;
7167 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7170 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7172 ret_var_set = cfg->ret_var_set;
7174 cfg->inlined_method = prev_inlined_method;
7175 cfg->real_offset = prev_real_offset;
7176 cfg->cbb_hash = prev_cbb_hash;
7177 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7178 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7179 cfg->cil_start = prev_cil_start;
7181 cfg->locals = prev_locals;
7182 cfg->args = prev_args;
7183 cfg->arg_types = prev_arg_types;
7184 cfg->current_method = prev_current_method;
7185 cfg->generic_context = prev_generic_context;
7186 cfg->ret_var_set = prev_ret_var_set;
7187 cfg->disable_inline = prev_disable_inline;
7188 cfg->inline_depth --;
7190 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7191 if (cfg->verbose_level > 2)
7192 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7194 cfg->stat_inlined_methods++;
7196 /* always add some code to avoid block split failures */
7197 MONO_INST_NEW (cfg, ins, OP_NOP);
7198 MONO_ADD_INS (prev_cbb, ins);
7200 prev_cbb->next_bb = sbblock;
7201 link_bblock (cfg, prev_cbb, sbblock);
7204 * Get rid of the begin and end bblocks if possible to aid local
7207 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7209 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7210 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7212 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7213 MonoBasicBlock *prev = ebblock->in_bb [0];
7215 if (prev->next_bb == ebblock) {
7216 mono_merge_basic_blocks (cfg, prev, ebblock);
7218 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7219 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7220 cfg->cbb = prev_cbb;
7223 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7228 * Its possible that the rvar is set in some prev bblock, but not in others.
7234 for (i = 0; i < ebblock->in_count; ++i) {
7235 bb = ebblock->in_bb [i];
7237 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7240 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7250 * If the inlined method contains only a throw, then the ret var is not
7251 * set, so set it to a dummy value.
7254 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7256 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7259 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7262 if (cfg->verbose_level > 2)
7263 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7264 cfg->exception_type = MONO_EXCEPTION_NONE;
7266 /* This gets rid of the newly added bblocks */
7267 cfg->cbb = prev_cbb;
7269 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7274 * Some of these comments may well be out-of-date.
7275 * Design decisions: we do a single pass over the IL code (and we do bblock
7276 * splitting/merging in the few cases when it's required: a back jump to an IL
7277 * address that was not already seen as bblock starting point).
7278 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7279 * Complex operations are decomposed in simpler ones right away. We need to let the
7280 * arch-specific code peek and poke inside this process somehow (except when the
7281 * optimizations can take advantage of the full semantic info of coarse opcodes).
7282 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7283 * MonoInst->opcode initially is the IL opcode or some simplification of that
7284 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7285 * opcode with value bigger than OP_LAST.
7286 * At this point the IR can be handed over to an interpreter, a dumb code generator
7287 * or to the optimizing code generator that will translate it to SSA form.
7289 * Profiling directed optimizations.
7290 * We may compile by default with few or no optimizations and instrument the code
7291 * or the user may indicate what methods to optimize the most either in a config file
7292 * or through repeated runs where the compiler applies offline the optimizations to
7293 * each method and then decides if it was worth it.
7296 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7297 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7298 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7299 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7300 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7301 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7302 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7303 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7305 /* offset from br.s -> br like opcodes */
7306 #define BIG_BRANCH_OFFSET 13
7309 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7311 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7313 return b == NULL || b == bb;
7317 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7319 unsigned char *ip = start;
7320 unsigned char *target;
7323 MonoBasicBlock *bblock;
7324 const MonoOpcode *opcode;
7327 cli_addr = ip - start;
7328 i = mono_opcode_value ((const guint8 **)&ip, end);
7331 opcode = &mono_opcodes [i];
7332 switch (opcode->argument) {
7333 case MonoInlineNone:
7336 case MonoInlineString:
7337 case MonoInlineType:
7338 case MonoInlineField:
7339 case MonoInlineMethod:
7342 case MonoShortInlineR:
7349 case MonoShortInlineVar:
7350 case MonoShortInlineI:
7353 case MonoShortInlineBrTarget:
7354 target = start + cli_addr + 2 + (signed char)ip [1];
7355 GET_BBLOCK (cfg, bblock, target);
7358 GET_BBLOCK (cfg, bblock, ip);
7360 case MonoInlineBrTarget:
7361 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7362 GET_BBLOCK (cfg, bblock, target);
7365 GET_BBLOCK (cfg, bblock, ip);
7367 case MonoInlineSwitch: {
7368 guint32 n = read32 (ip + 1);
7371 cli_addr += 5 + 4 * n;
7372 target = start + cli_addr;
7373 GET_BBLOCK (cfg, bblock, target);
7375 for (j = 0; j < n; ++j) {
7376 target = start + cli_addr + (gint32)read32 (ip);
7377 GET_BBLOCK (cfg, bblock, target);
7387 g_assert_not_reached ();
7390 if (i == CEE_THROW) {
7391 unsigned char *bb_start = ip - 1;
7393 /* Find the start of the bblock containing the throw */
7395 while ((bb_start >= start) && !bblock) {
7396 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7400 bblock->out_of_line = 1;
7410 static inline MonoMethod *
7411 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7415 mono_error_init (error);
7417 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7418 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7420 method = mono_class_inflate_generic_method_checked (method, context, error);
7423 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7429 static inline MonoMethod *
7430 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7433 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7435 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7436 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7440 if (!method && !cfg)
7441 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7446 static inline MonoClass*
7447 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7452 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7453 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7455 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7456 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7459 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7460 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7463 mono_class_init (klass);
7467 static inline MonoMethodSignature*
7468 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
7470 MonoMethodSignature *fsig;
7472 mono_error_init (error);
7473 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7474 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7476 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
7477 return_val_if_nok (error, NULL);
7480 fsig = mono_inflate_generic_signature(fsig, context, error);
7486 throw_exception (void)
7488 static MonoMethod *method = NULL;
7491 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7492 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7499 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7501 MonoMethod *thrower = throw_exception ();
7504 EMIT_NEW_PCONST (cfg, args [0], ex);
7505 mono_emit_method_call (cfg, thrower, args, NULL);
7509 * Return the original method is a wrapper is specified. We can only access
7510 * the custom attributes from the original method.
7513 get_original_method (MonoMethod *method)
7515 if (method->wrapper_type == MONO_WRAPPER_NONE)
7518 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7519 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7522 /* in other cases we need to find the original method */
7523 return mono_marshal_method_from_wrapper (method);
7527 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7529 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7530 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7532 emit_throw_exception (cfg, ex);
7536 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7538 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7539 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7541 emit_throw_exception (cfg, ex);
7545 * Check that the IL instructions at ip are the array initialization
7546 * sequence and return the pointer to the data and the size.
7549 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7552 * newarr[System.Int32]
7554 * ldtoken field valuetype ...
7555 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7557 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7559 guint32 token = read32 (ip + 7);
7560 guint32 field_token = read32 (ip + 2);
7561 guint32 field_index = field_token & 0xffffff;
7563 const char *data_ptr;
7565 MonoMethod *cmethod;
7566 MonoClass *dummy_class;
7567 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7571 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7575 *out_field_token = field_token;
7577 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7580 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7582 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7583 case MONO_TYPE_BOOLEAN:
7587 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7588 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7589 case MONO_TYPE_CHAR:
7606 if (size > mono_type_size (field->type, &dummy_align))
7609 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7610 if (!image_is_dynamic (method->klass->image)) {
7611 field_index = read32 (ip + 2) & 0xffffff;
7612 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7613 data_ptr = mono_image_rva_map (method->klass->image, rva);
7614 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7615 /* for aot code we do the lookup on load */
7616 if (aot && data_ptr)
7617 return (const char *)GUINT_TO_POINTER (rva);
7619 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7621 data_ptr = mono_field_get_data (field);
7629 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7632 char *method_fname = mono_method_full_name (method, TRUE);
7634 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7637 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7638 mono_error_cleanup (&error);
7639 } else if (header->code_size == 0)
7640 method_code = g_strdup ("method body is empty.");
7642 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7643 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7644 g_free (method_fname);
7645 g_free (method_code);
7646 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7650 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7653 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7654 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7655 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7656 /* Optimize reg-reg moves away */
7658 * Can't optimize other opcodes, since sp[0] might point to
7659 * the last ins of a decomposed opcode.
7661 sp [0]->dreg = (cfg)->locals [n]->dreg;
7663 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7668 * ldloca inhibits many optimizations so try to get rid of it in common
7671 static inline unsigned char *
7672 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7682 local = read16 (ip + 2);
7686 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7687 /* From the INITOBJ case */
7688 token = read32 (ip + 2);
7689 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7690 CHECK_TYPELOAD (klass);
7691 type = mini_get_underlying_type (&klass->byval_arg);
7692 emit_init_local (cfg, local, type, TRUE);
7700 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7702 MonoInst *icall_args [16];
7703 MonoInst *call_target, *ins, *vtable_ins;
7704 int arg_reg, this_reg, vtable_reg;
7705 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7706 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7707 gboolean variant_iface = FALSE;
7712 * In llvm-only mode, vtables contain function descriptors instead of
7713 * method addresses/trampolines.
7715 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7718 slot = mono_method_get_imt_slot (cmethod);
7720 slot = mono_method_get_vtable_index (cmethod);
7722 this_reg = sp [0]->dreg;
7724 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7725 variant_iface = TRUE;
7727 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7729 * The simplest case, a normal virtual call.
7731 int slot_reg = alloc_preg (cfg);
7732 int addr_reg = alloc_preg (cfg);
7733 int arg_reg = alloc_preg (cfg);
7734 MonoBasicBlock *non_null_bb;
7736 vtable_reg = alloc_preg (cfg);
7737 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7738 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7740 /* Load the vtable slot, which contains a function descriptor. */
7741 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7743 NEW_BBLOCK (cfg, non_null_bb);
7745 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7746 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7747 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7750 // FIXME: Make the wrapper use the preserveall cconv
7751 // FIXME: Use one icall per slot for small slot numbers ?
7752 icall_args [0] = vtable_ins;
7753 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7754 /* Make the icall return the vtable slot value to save some code space */
7755 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7756 ins->dreg = slot_reg;
7757 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7760 MONO_START_BB (cfg, non_null_bb);
7761 /* Load the address + arg from the vtable slot */
7762 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7765 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7768 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7770 * A simple interface call
7772 * We make a call through an imt slot to obtain the function descriptor we need to call.
7773 * The imt slot contains a function descriptor for a runtime function + arg.
7775 int slot_reg = alloc_preg (cfg);
7776 int addr_reg = alloc_preg (cfg);
7777 int arg_reg = alloc_preg (cfg);
7778 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7780 vtable_reg = alloc_preg (cfg);
7781 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7782 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7785 * The slot is already initialized when the vtable is created so there is no need
7789 /* Load the imt slot, which contains a function descriptor. */
7790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7792 /* Load the address + arg of the imt thunk from the imt slot */
7793 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7794 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7796 * IMT thunks in llvm-only mode are C functions which take an info argument
7797 * plus the imt method and return the ftndesc to call.
7799 icall_args [0] = thunk_arg_ins;
7800 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7801 cmethod, MONO_RGCTX_INFO_METHOD);
7802 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7804 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7807 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7809 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7810 * dynamically extended as more instantiations are discovered.
7811 * This handles generic virtual methods both on classes and interfaces.
7813 int slot_reg = alloc_preg (cfg);
7814 int addr_reg = alloc_preg (cfg);
7815 int arg_reg = alloc_preg (cfg);
7816 int ftndesc_reg = alloc_preg (cfg);
7817 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7818 MonoBasicBlock *slowpath_bb, *end_bb;
7820 NEW_BBLOCK (cfg, slowpath_bb);
7821 NEW_BBLOCK (cfg, end_bb);
7823 vtable_reg = alloc_preg (cfg);
7824 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7826 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7828 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7830 /* Load the slot, which contains a function descriptor. */
7831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7833 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7834 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7839 /* Same as with iface calls */
7840 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7841 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7842 icall_args [0] = thunk_arg_ins;
7843 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7844 cmethod, MONO_RGCTX_INFO_METHOD);
7845 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7846 ftndesc_ins->dreg = ftndesc_reg;
7848 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7849 * they don't know about yet. Fall back to the slowpath in that case.
7851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7852 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7854 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7857 MONO_START_BB (cfg, slowpath_bb);
7858 icall_args [0] = vtable_ins;
7859 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7860 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7861 cmethod, MONO_RGCTX_INFO_METHOD);
7863 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7865 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7866 ftndesc_ins->dreg = ftndesc_reg;
7867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7870 MONO_START_BB (cfg, end_bb);
7871 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7875 * Non-optimized cases
7877 icall_args [0] = sp [0];
7878 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7880 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7881 cmethod, MONO_RGCTX_INFO_METHOD);
7883 arg_reg = alloc_preg (cfg);
7884 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7885 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7887 g_assert (is_gsharedvt);
7889 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7891 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7894 * Pass the extra argument even if the callee doesn't receive it, most
7895 * calling conventions allow this.
7897 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7901 is_exception_class (MonoClass *klass)
7904 if (klass == mono_defaults.exception_class)
7906 klass = klass->parent;
7912 * is_jit_optimizer_disabled:
7914 * Determine whenever M's assembly has a DebuggableAttribute with the
7915 * IsJITOptimizerDisabled flag set.
7918 is_jit_optimizer_disabled (MonoMethod *m)
7921 MonoAssembly *ass = m->klass->image->assembly;
7922 MonoCustomAttrInfo* attrs;
7925 gboolean val = FALSE;
7928 if (ass->jit_optimizer_disabled_inited)
7929 return ass->jit_optimizer_disabled;
7931 klass = mono_class_try_get_debuggable_attribute_class ();
7935 ass->jit_optimizer_disabled = FALSE;
7936 mono_memory_barrier ();
7937 ass->jit_optimizer_disabled_inited = TRUE;
7941 attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
7942 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7944 for (i = 0; i < attrs->num_attrs; ++i) {
7945 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7947 MonoMethodSignature *sig;
7949 if (!attr->ctor || attr->ctor->klass != klass)
7951 /* Decode the attribute. See reflection.c */
7952 p = (const char*)attr->data;
7953 g_assert (read16 (p) == 0x0001);
7956 // FIXME: Support named parameters
7957 sig = mono_method_signature (attr->ctor);
7958 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7960 /* Two boolean arguments */
7964 mono_custom_attrs_free (attrs);
7967 ass->jit_optimizer_disabled = val;
7968 mono_memory_barrier ();
7969 ass->jit_optimizer_disabled_inited = TRUE;
7975 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7977 gboolean supported_tail_call;
7980 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7982 for (i = 0; i < fsig->param_count; ++i) {
7983 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7984 /* These can point to the current method's stack */
7985 supported_tail_call = FALSE;
7987 if (fsig->hasthis && cmethod->klass->valuetype)
7988 /* this might point to the current method's stack */
7989 supported_tail_call = FALSE;
7990 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7991 supported_tail_call = FALSE;
7992 if (cfg->method->save_lmf)
7993 supported_tail_call = FALSE;
7994 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7995 supported_tail_call = FALSE;
7996 if (call_opcode != CEE_CALL)
7997 supported_tail_call = FALSE;
7999 /* Debugging support */
8001 if (supported_tail_call) {
8002 if (!mono_debug_count ())
8003 supported_tail_call = FALSE;
8007 return supported_tail_call;
8013 * Handle calls made to ctors from NEWOBJ opcodes.
8016 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
8017 MonoInst **sp, guint8 *ip, int *inline_costs)
8019 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8021 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8022 mono_method_is_generic_sharable (cmethod, TRUE)) {
8023 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8024 mono_class_vtable (cfg->domain, cmethod->klass);
8025 CHECK_TYPELOAD (cmethod->klass);
8027 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8028 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8031 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8032 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8034 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8036 CHECK_TYPELOAD (cmethod->klass);
8037 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8042 /* Avoid virtual calls to ctors if possible */
8043 if (mono_class_is_marshalbyref (cmethod->klass))
8044 callvirt_this_arg = sp [0];
8046 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8047 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8048 CHECK_CFG_EXCEPTION;
8049 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8050 mono_method_check_inlining (cfg, cmethod) &&
8051 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8054 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8055 cfg->real_offset += 5;
8057 *inline_costs += costs - 5;
8059 INLINE_FAILURE ("inline failure");
8060 // FIXME-VT: Clean this up
8061 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8062 GSHAREDVT_FAILURE(*ip);
8063 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8065 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8068 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8070 if (cfg->llvm_only) {
8071 // FIXME: Avoid initializing vtable_arg
8072 emit_llvmonly_calli (cfg, fsig, sp, addr);
8074 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8076 } else if (context_used &&
8077 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8078 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8079 MonoInst *cmethod_addr;
8081 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8083 if (cfg->llvm_only) {
8084 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8085 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8086 emit_llvmonly_calli (cfg, fsig, sp, addr);
8088 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8089 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8091 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8094 INLINE_FAILURE ("ctor call");
8095 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8096 callvirt_this_arg, NULL, vtable_arg);
8103 emit_setret (MonoCompile *cfg, MonoInst *val)
8105 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8108 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8111 if (!cfg->vret_addr) {
8112 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8114 EMIT_NEW_RETLOADA (cfg, ret_addr);
8116 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8117 ins->klass = mono_class_from_mono_type (ret_type);
8120 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8121 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8122 MonoInst *iargs [1];
8126 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8127 mono_arch_emit_setret (cfg, cfg->method, conv);
8129 mono_arch_emit_setret (cfg, cfg->method, val);
8132 mono_arch_emit_setret (cfg, cfg->method, val);
8138 * mono_method_to_ir:
8140 * Translate the .net IL into linear IR.
8143 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8144 MonoInst *return_var, MonoInst **inline_args,
8145 guint inline_offset, gboolean is_virtual_call)
8148 MonoInst *ins, **sp, **stack_start;
8149 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8150 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8151 MonoMethod *cmethod, *method_definition;
8152 MonoInst **arg_array;
8153 MonoMethodHeader *header;
8155 guint32 token, ins_flag;
8157 MonoClass *constrained_class = NULL;
8158 unsigned char *ip, *end, *target, *err_pos;
8159 MonoMethodSignature *sig;
8160 MonoGenericContext *generic_context = NULL;
8161 MonoGenericContainer *generic_container = NULL;
8162 MonoType **param_types;
8163 int i, n, start_new_bblock, dreg;
8164 int num_calls = 0, inline_costs = 0;
8165 int breakpoint_id = 0;
8167 GSList *class_inits = NULL;
8168 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8170 gboolean init_locals, seq_points, skip_dead_blocks;
8171 gboolean sym_seq_points = FALSE;
8172 MonoDebugMethodInfo *minfo;
8173 MonoBitSet *seq_point_locs = NULL;
8174 MonoBitSet *seq_point_set_locs = NULL;
8176 cfg->disable_inline = is_jit_optimizer_disabled (method);
8178 /* serialization and xdomain stuff may need access to private fields and methods */
8179 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8180 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8181 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8182 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8183 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8184 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8186 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8187 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8188 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8189 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8190 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8192 image = method->klass->image;
8193 header = mono_method_get_header_checked (method, &cfg->error);
8195 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8196 goto exception_exit;
8198 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
8201 generic_container = mono_method_get_generic_container (method);
8202 sig = mono_method_signature (method);
8203 num_args = sig->hasthis + sig->param_count;
8204 ip = (unsigned char*)header->code;
8205 cfg->cil_start = ip;
8206 end = ip + header->code_size;
8207 cfg->stat_cil_code_size += header->code_size;
8209 seq_points = cfg->gen_seq_points && cfg->method == method;
8211 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8212 /* We could hit a seq point before attaching to the JIT (#8338) */
8216 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8217 minfo = mono_debug_lookup_method (method);
8219 MonoSymSeqPoint *sps;
8220 int i, n_il_offsets;
8222 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8223 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8224 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8225 sym_seq_points = TRUE;
8226 for (i = 0; i < n_il_offsets; ++i) {
8227 if (sps [i].il_offset < header->code_size)
8228 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8231 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8232 /* Methods without line number info like auto-generated property accessors */
8233 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8234 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8235 sym_seq_points = TRUE;
8240 * Methods without init_locals set could cause asserts in various passes
8241 * (#497220). To work around this, we emit dummy initialization opcodes
8242 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8243 * on some platforms.
8245 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8246 init_locals = header->init_locals;
8250 method_definition = method;
8251 while (method_definition->is_inflated) {
8252 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8253 method_definition = imethod->declaring;
8256 /* SkipVerification is not allowed if core-clr is enabled */
8257 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8259 dont_verify_stloc = TRUE;
8262 if (sig->is_inflated)
8263 generic_context = mono_method_get_context (method);
8264 else if (generic_container)
8265 generic_context = &generic_container->context;
8266 cfg->generic_context = generic_context;
8269 g_assert (!sig->has_type_parameters);
8271 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8272 g_assert (method->is_inflated);
8273 g_assert (mono_method_get_context (method)->method_inst);
8275 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8276 g_assert (sig->generic_param_count);
8278 if (cfg->method == method) {
8279 cfg->real_offset = 0;
8281 cfg->real_offset = inline_offset;
8284 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8285 cfg->cil_offset_to_bb_len = header->code_size;
8287 cfg->current_method = method;
8289 if (cfg->verbose_level > 2)
8290 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8292 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8294 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8295 for (n = 0; n < sig->param_count; ++n)
8296 param_types [n + sig->hasthis] = sig->params [n];
8297 cfg->arg_types = param_types;
8299 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8300 if (cfg->method == method) {
8302 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8303 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8306 NEW_BBLOCK (cfg, start_bblock);
8307 cfg->bb_entry = start_bblock;
8308 start_bblock->cil_code = NULL;
8309 start_bblock->cil_length = 0;
8312 NEW_BBLOCK (cfg, end_bblock);
8313 cfg->bb_exit = end_bblock;
8314 end_bblock->cil_code = NULL;
8315 end_bblock->cil_length = 0;
8316 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8317 g_assert (cfg->num_bblocks == 2);
8319 arg_array = cfg->args;
8321 if (header->num_clauses) {
8322 cfg->spvars = g_hash_table_new (NULL, NULL);
8323 cfg->exvars = g_hash_table_new (NULL, NULL);
8325 /* handle exception clauses */
8326 for (i = 0; i < header->num_clauses; ++i) {
8327 MonoBasicBlock *try_bb;
8328 MonoExceptionClause *clause = &header->clauses [i];
8329 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8331 try_bb->real_offset = clause->try_offset;
8332 try_bb->try_start = TRUE;
8333 try_bb->region = ((i + 1) << 8) | clause->flags;
8334 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8335 tblock->real_offset = clause->handler_offset;
8336 tblock->flags |= BB_EXCEPTION_HANDLER;
8339 * Linking the try block with the EH block hinders inlining as we won't be able to
8340 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8342 if (COMPILE_LLVM (cfg))
8343 link_bblock (cfg, try_bb, tblock);
8345 if (*(ip + clause->handler_offset) == CEE_POP)
8346 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8348 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8349 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8350 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8351 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8352 MONO_ADD_INS (tblock, ins);
8354 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8355 /* finally clauses already have a seq point */
8356 /* seq points for filter clauses are emitted below */
8357 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8358 MONO_ADD_INS (tblock, ins);
8361 /* todo: is a fault block unsafe to optimize? */
8362 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8363 tblock->flags |= BB_EXCEPTION_UNSAFE;
8366 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8368 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8370 /* catch and filter blocks get the exception object on the stack */
8371 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8372 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8374 /* mostly like handle_stack_args (), but just sets the input args */
8375 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8376 tblock->in_scount = 1;
8377 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8378 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8382 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8383 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8384 if (!cfg->compile_llvm) {
8385 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8386 ins->dreg = tblock->in_stack [0]->dreg;
8387 MONO_ADD_INS (tblock, ins);
8390 MonoInst *dummy_use;
8393 * Add a dummy use for the exvar so its liveness info will be
8396 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8399 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8400 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8401 MONO_ADD_INS (tblock, ins);
8404 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8405 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8406 tblock->flags |= BB_EXCEPTION_HANDLER;
8407 tblock->real_offset = clause->data.filter_offset;
8408 tblock->in_scount = 1;
8409 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8410 /* The filter block shares the exvar with the handler block */
8411 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8412 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8413 MONO_ADD_INS (tblock, ins);
8417 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8418 clause->data.catch_class &&
8420 mono_class_check_context_used (clause->data.catch_class)) {
8422 * In shared generic code with catch
8423 * clauses containing type variables
8424 * the exception handling code has to
8425 * be able to get to the rgctx.
8426 * Therefore we have to make sure that
8427 * the vtable/mrgctx argument (for
8428 * static or generic methods) or the
8429 * "this" argument (for non-static
8430 * methods) are live.
8432 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8433 mini_method_get_context (method)->method_inst ||
8434 method->klass->valuetype) {
8435 mono_get_vtable_var (cfg);
8437 MonoInst *dummy_use;
8439 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8444 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8445 cfg->cbb = start_bblock;
8446 cfg->args = arg_array;
8447 mono_save_args (cfg, sig, inline_args);
8450 /* FIRST CODE BLOCK */
8451 NEW_BBLOCK (cfg, tblock);
8452 tblock->cil_code = ip;
8456 ADD_BBLOCK (cfg, tblock);
8458 if (cfg->method == method) {
8459 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8460 if (breakpoint_id) {
8461 MONO_INST_NEW (cfg, ins, OP_BREAK);
8462 MONO_ADD_INS (cfg->cbb, ins);
8466 /* we use a separate basic block for the initialization code */
8467 NEW_BBLOCK (cfg, init_localsbb);
8468 cfg->bb_init = init_localsbb;
8469 init_localsbb->real_offset = cfg->real_offset;
8470 start_bblock->next_bb = init_localsbb;
8471 init_localsbb->next_bb = cfg->cbb;
8472 link_bblock (cfg, start_bblock, init_localsbb);
8473 link_bblock (cfg, init_localsbb, cfg->cbb);
8475 cfg->cbb = init_localsbb;
8477 if (cfg->gsharedvt && cfg->method == method) {
8478 MonoGSharedVtMethodInfo *info;
8479 MonoInst *var, *locals_var;
8482 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8483 info->method = cfg->method;
8484 info->count_entries = 16;
8485 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8486 cfg->gsharedvt_info = info;
8488 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8489 /* prevent it from being register allocated */
8490 //var->flags |= MONO_INST_VOLATILE;
8491 cfg->gsharedvt_info_var = var;
8493 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8494 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8496 /* Allocate locals */
8497 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8498 /* prevent it from being register allocated */
8499 //locals_var->flags |= MONO_INST_VOLATILE;
8500 cfg->gsharedvt_locals_var = locals_var;
8502 dreg = alloc_ireg (cfg);
8503 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8505 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8506 ins->dreg = locals_var->dreg;
8508 MONO_ADD_INS (cfg->cbb, ins);
8509 cfg->gsharedvt_locals_var_ins = ins;
8511 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8514 ins->flags |= MONO_INST_INIT;
8518 if (mono_security_core_clr_enabled ()) {
8519 /* check if this is native code, e.g. an icall or a p/invoke */
8520 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8521 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8523 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8524 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8526 /* if this ia a native call then it can only be JITted from platform code */
8527 if ((icall || pinvk) && method->klass && method->klass->image) {
8528 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8529 MonoException *ex = icall ? mono_get_exception_security () :
8530 mono_get_exception_method_access ();
8531 emit_throw_exception (cfg, ex);
8538 CHECK_CFG_EXCEPTION;
8540 if (header->code_size == 0)
8543 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8548 if (cfg->method == method)
8549 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8551 for (n = 0; n < header->num_locals; ++n) {
8552 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8557 /* We force the vtable variable here for all shared methods
8558 for the possibility that they might show up in a stack
8559 trace where their exact instantiation is needed. */
8560 if (cfg->gshared && method == cfg->method) {
8561 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8562 mini_method_get_context (method)->method_inst ||
8563 method->klass->valuetype) {
8564 mono_get_vtable_var (cfg);
8566 /* FIXME: Is there a better way to do this?
8567 We need the variable live for the duration
8568 of the whole method. */
8569 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8573 /* add a check for this != NULL to inlined methods */
8574 if (is_virtual_call) {
8577 NEW_ARGLOAD (cfg, arg_ins, 0);
8578 MONO_ADD_INS (cfg->cbb, arg_ins);
8579 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8582 skip_dead_blocks = !dont_verify;
8583 if (skip_dead_blocks) {
8584 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
8589 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8590 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8593 start_new_bblock = 0;
8595 if (cfg->method == method)
8596 cfg->real_offset = ip - header->code;
8598 cfg->real_offset = inline_offset;
8603 if (start_new_bblock) {
8604 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8605 if (start_new_bblock == 2) {
8606 g_assert (ip == tblock->cil_code);
8608 GET_BBLOCK (cfg, tblock, ip);
8610 cfg->cbb->next_bb = tblock;
8612 start_new_bblock = 0;
8613 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8614 if (cfg->verbose_level > 3)
8615 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8616 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8620 g_slist_free (class_inits);
8623 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8624 link_bblock (cfg, cfg->cbb, tblock);
8625 if (sp != stack_start) {
8626 handle_stack_args (cfg, stack_start, sp - stack_start);
8628 CHECK_UNVERIFIABLE (cfg);
8630 cfg->cbb->next_bb = tblock;
8632 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8633 if (cfg->verbose_level > 3)
8634 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8635 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8638 g_slist_free (class_inits);
8643 if (skip_dead_blocks) {
8644 int ip_offset = ip - header->code;
8646 if (ip_offset == bb->end)
8650 int op_size = mono_opcode_size (ip, end);
8651 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8653 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8655 if (ip_offset + op_size == bb->end) {
8656 MONO_INST_NEW (cfg, ins, OP_NOP);
8657 MONO_ADD_INS (cfg->cbb, ins);
8658 start_new_bblock = 1;
8666 * Sequence points are points where the debugger can place a breakpoint.
8667 * Currently, we generate these automatically at points where the IL
8670 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8672 * Make methods interruptable at the beginning, and at the targets of
8673 * backward branches.
8674 * Also, do this at the start of every bblock in methods with clauses too,
8675 * to be able to handle instructions with inprecise control flow like
8677 * Backward branches are handled at the end of method-to-ir ().
8679 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8680 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8682 /* Avoid sequence points on empty IL like .volatile */
8683 // FIXME: Enable this
8684 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8685 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8686 if ((sp != stack_start) && !sym_seq_point)
8687 ins->flags |= MONO_INST_NONEMPTY_STACK;
8688 MONO_ADD_INS (cfg->cbb, ins);
8691 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8694 cfg->cbb->real_offset = cfg->real_offset;
8696 if ((cfg->method == method) && cfg->coverage_info) {
8697 guint32 cil_offset = ip - header->code;
8698 cfg->coverage_info->data [cil_offset].cil_code = ip;
8700 /* TODO: Use an increment here */
8701 #if defined(TARGET_X86)
8702 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8703 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8705 MONO_ADD_INS (cfg->cbb, ins);
8707 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8712 if (cfg->verbose_level > 3)
8713 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8717 if (seq_points && !sym_seq_points && sp != stack_start) {
8719 * The C# compiler uses these nops to notify the JIT that it should
8720 * insert seq points.
8722 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8723 MONO_ADD_INS (cfg->cbb, ins);
8725 if (cfg->keep_cil_nops)
8726 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8728 MONO_INST_NEW (cfg, ins, OP_NOP);
8730 MONO_ADD_INS (cfg->cbb, ins);
8733 if (should_insert_brekpoint (cfg->method)) {
8734 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8736 MONO_INST_NEW (cfg, ins, OP_NOP);
8739 MONO_ADD_INS (cfg->cbb, ins);
8745 CHECK_STACK_OVF (1);
8746 n = (*ip)-CEE_LDARG_0;
8748 EMIT_NEW_ARGLOAD (cfg, ins, n);
8756 CHECK_STACK_OVF (1);
8757 n = (*ip)-CEE_LDLOC_0;
8759 EMIT_NEW_LOCLOAD (cfg, ins, n);
8768 n = (*ip)-CEE_STLOC_0;
8771 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8773 emit_stloc_ir (cfg, sp, header, n);
8780 CHECK_STACK_OVF (1);
8783 EMIT_NEW_ARGLOAD (cfg, ins, n);
8789 CHECK_STACK_OVF (1);
8792 NEW_ARGLOADA (cfg, ins, n);
8793 MONO_ADD_INS (cfg->cbb, ins);
8803 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8805 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8810 CHECK_STACK_OVF (1);
8813 EMIT_NEW_LOCLOAD (cfg, ins, n);
8817 case CEE_LDLOCA_S: {
8818 unsigned char *tmp_ip;
8820 CHECK_STACK_OVF (1);
8821 CHECK_LOCAL (ip [1]);
8823 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8829 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8838 CHECK_LOCAL (ip [1]);
8839 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8841 emit_stloc_ir (cfg, sp, header, ip [1]);
8846 CHECK_STACK_OVF (1);
8847 EMIT_NEW_PCONST (cfg, ins, NULL);
8848 ins->type = STACK_OBJ;
8853 CHECK_STACK_OVF (1);
8854 EMIT_NEW_ICONST (cfg, ins, -1);
8867 CHECK_STACK_OVF (1);
8868 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8874 CHECK_STACK_OVF (1);
8876 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8882 CHECK_STACK_OVF (1);
8883 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8889 CHECK_STACK_OVF (1);
8890 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8891 ins->type = STACK_I8;
8892 ins->dreg = alloc_dreg (cfg, STACK_I8);
8894 ins->inst_l = (gint64)read64 (ip);
8895 MONO_ADD_INS (cfg->cbb, ins);
8901 gboolean use_aotconst = FALSE;
8903 #ifdef TARGET_POWERPC
8904 /* FIXME: Clean this up */
8905 if (cfg->compile_aot)
8906 use_aotconst = TRUE;
8909 /* FIXME: we should really allocate this only late in the compilation process */
8910 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8912 CHECK_STACK_OVF (1);
8918 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8920 dreg = alloc_freg (cfg);
8921 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8922 ins->type = cfg->r4_stack_type;
8924 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8925 ins->type = cfg->r4_stack_type;
8926 ins->dreg = alloc_dreg (cfg, STACK_R8);
8928 MONO_ADD_INS (cfg->cbb, ins);
8938 gboolean use_aotconst = FALSE;
8940 #ifdef TARGET_POWERPC
8941 /* FIXME: Clean this up */
8942 if (cfg->compile_aot)
8943 use_aotconst = TRUE;
8946 /* FIXME: we should really allocate this only late in the compilation process */
8947 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8949 CHECK_STACK_OVF (1);
8955 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8957 dreg = alloc_freg (cfg);
8958 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8959 ins->type = STACK_R8;
8961 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8962 ins->type = STACK_R8;
8963 ins->dreg = alloc_dreg (cfg, STACK_R8);
8965 MONO_ADD_INS (cfg->cbb, ins);
8974 MonoInst *temp, *store;
8976 CHECK_STACK_OVF (1);
8980 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8981 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8983 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8986 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8999 if (sp [0]->type == STACK_R8)
9000 /* we need to pop the value from the x86 FP stack */
9001 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
9006 MonoMethodSignature *fsig;
9009 INLINE_FAILURE ("jmp");
9010 GSHAREDVT_FAILURE (*ip);
9013 if (stack_start != sp)
9015 token = read32 (ip + 1);
9016 /* FIXME: check the signature matches */
9017 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9020 if (cfg->gshared && mono_method_check_context_used (cmethod))
9021 GENERIC_SHARING_FAILURE (CEE_JMP);
9023 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9025 fsig = mono_method_signature (cmethod);
9026 n = fsig->param_count + fsig->hasthis;
9027 if (cfg->llvm_only) {
9030 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9031 for (i = 0; i < n; ++i)
9032 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9033 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9035 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9036 * have to emit a normal return since llvm expects it.
9039 emit_setret (cfg, ins);
9040 MONO_INST_NEW (cfg, ins, OP_BR);
9041 ins->inst_target_bb = end_bblock;
9042 MONO_ADD_INS (cfg->cbb, ins);
9043 link_bblock (cfg, cfg->cbb, end_bblock);
9046 } else if (cfg->backend->have_op_tail_call) {
9047 /* Handle tail calls similarly to calls */
9050 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9051 call->method = cmethod;
9052 call->tail_call = TRUE;
9053 call->signature = mono_method_signature (cmethod);
9054 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9055 call->inst.inst_p0 = cmethod;
9056 for (i = 0; i < n; ++i)
9057 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9059 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
9060 call->vret_var = cfg->vret_addr;
9062 mono_arch_emit_call (cfg, call);
9063 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9064 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9066 for (i = 0; i < num_args; ++i)
9067 /* Prevent arguments from being optimized away */
9068 arg_array [i]->flags |= MONO_INST_VOLATILE;
9070 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9071 ins = (MonoInst*)call;
9072 ins->inst_p0 = cmethod;
9073 MONO_ADD_INS (cfg->cbb, ins);
9077 start_new_bblock = 1;
9082 MonoMethodSignature *fsig;
9085 token = read32 (ip + 1);
9089 //GSHAREDVT_FAILURE (*ip);
9094 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
9097 if (method->dynamic && fsig->pinvoke) {
9101 * This is a call through a function pointer using a pinvoke
9102 * signature. Have to create a wrapper and call that instead.
9103 * FIXME: This is very slow, need to create a wrapper at JIT time
9104 * instead based on the signature.
9106 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9107 EMIT_NEW_PCONST (cfg, args [1], fsig);
9109 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9112 n = fsig->param_count + fsig->hasthis;
9116 //g_assert (!virtual_ || fsig->hasthis);
9120 inline_costs += 10 * num_calls++;
9123 * Making generic calls out of gsharedvt methods.
9124 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9125 * patching gshared method addresses into a gsharedvt method.
9127 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9129 * We pass the address to the gsharedvt trampoline in the rgctx reg
9131 MonoInst *callee = addr;
9133 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9135 GSHAREDVT_FAILURE (*ip);
9139 GSHAREDVT_FAILURE (*ip);
9141 addr = emit_get_rgctx_sig (cfg, context_used,
9142 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9143 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9147 /* Prevent inlining of methods with indirect calls */
9148 INLINE_FAILURE ("indirect call");
9150 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9151 MonoJumpInfoType info_type;
9155 * Instead of emitting an indirect call, emit a direct call
9156 * with the contents of the aotconst as the patch info.
9158 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9159 info_type = (MonoJumpInfoType)addr->inst_c1;
9160 info_data = addr->inst_p0;
9162 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9163 info_data = addr->inst_right->inst_left;
9166 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
9167 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
9170 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9171 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9176 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9180 /* End of call, INS should contain the result of the call, if any */
9182 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9184 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9187 CHECK_CFG_EXCEPTION;
9191 constrained_class = NULL;
9195 case CEE_CALLVIRT: {
9196 MonoInst *addr = NULL;
9197 MonoMethodSignature *fsig = NULL;
9199 int virtual_ = *ip == CEE_CALLVIRT;
9200 gboolean pass_imt_from_rgctx = FALSE;
9201 MonoInst *imt_arg = NULL;
9202 MonoInst *keep_this_alive = NULL;
9203 gboolean pass_vtable = FALSE;
9204 gboolean pass_mrgctx = FALSE;
9205 MonoInst *vtable_arg = NULL;
9206 gboolean check_this = FALSE;
9207 gboolean supported_tail_call = FALSE;
9208 gboolean tail_call = FALSE;
9209 gboolean need_seq_point = FALSE;
9210 guint32 call_opcode = *ip;
9211 gboolean emit_widen = TRUE;
9212 gboolean push_res = TRUE;
9213 gboolean skip_ret = FALSE;
9214 gboolean delegate_invoke = FALSE;
9215 gboolean direct_icall = FALSE;
9216 gboolean constrained_partial_call = FALSE;
9217 MonoMethod *cil_method;
9220 token = read32 (ip + 1);
9224 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9227 cil_method = cmethod;
9229 if (constrained_class) {
9230 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9231 if (!mini_is_gsharedvt_klass (constrained_class)) {
9232 g_assert (!cmethod->klass->valuetype);
9233 if (!mini_type_is_reference (&constrained_class->byval_arg))
9234 constrained_partial_call = TRUE;
9238 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9239 if (cfg->verbose_level > 2)
9240 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9241 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9242 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9244 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9248 if (cfg->verbose_level > 2)
9249 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9251 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9253 * This is needed since get_method_constrained can't find
9254 * the method in klass representing a type var.
9255 * The type var is guaranteed to be a reference type in this
9258 if (!mini_is_gsharedvt_klass (constrained_class))
9259 g_assert (!cmethod->klass->valuetype);
9261 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9267 if (!dont_verify && !cfg->skip_visibility) {
9268 MonoMethod *target_method = cil_method;
9269 if (method->is_inflated) {
9270 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9273 if (!mono_method_can_access_method (method_definition, target_method) &&
9274 !mono_method_can_access_method (method, cil_method))
9275 emit_method_access_failure (cfg, method, cil_method);
9278 if (mono_security_core_clr_enabled ())
9279 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9281 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9282 /* MS.NET seems to silently convert this to a callvirt */
9287 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9288 * converts to a callvirt.
9290 * tests/bug-515884.il is an example of this behavior
9292 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9293 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9294 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9298 if (!cmethod->klass->inited)
9299 if (!mono_class_init (cmethod->klass))
9300 TYPE_LOAD_ERROR (cmethod->klass);
9302 fsig = mono_method_signature (cmethod);
9305 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9306 mini_class_is_system_array (cmethod->klass)) {
9307 array_rank = cmethod->klass->rank;
9308 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9309 direct_icall = TRUE;
9310 } else if (fsig->pinvoke) {
9311 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9312 fsig = mono_method_signature (wrapper);
9313 } else if (constrained_class) {
9315 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9319 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
9320 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9322 /* See code below */
9323 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9324 MonoBasicBlock *tbb;
9326 GET_BBLOCK (cfg, tbb, ip + 5);
9327 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9329 * We want to extend the try block to cover the call, but we can't do it if the
9330 * call is made directly since its followed by an exception check.
9332 direct_icall = FALSE;
9336 mono_save_token_info (cfg, image, token, cil_method);
9338 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9339 need_seq_point = TRUE;
9341 /* Don't support calls made using type arguments for now */
9343 if (cfg->gsharedvt) {
9344 if (mini_is_gsharedvt_signature (fsig))
9345 GSHAREDVT_FAILURE (*ip);
9349 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9350 g_assert_not_reached ();
9352 n = fsig->param_count + fsig->hasthis;
9354 if (!cfg->gshared && cmethod->klass->generic_container)
9358 g_assert (!mono_method_check_context_used (cmethod));
9362 //g_assert (!virtual_ || fsig->hasthis);
9367 * We have the `constrained.' prefix opcode.
9369 if (constrained_class) {
9370 if (mini_is_gsharedvt_klass (constrained_class)) {
9371 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9372 /* The 'Own method' case below */
9373 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9374 /* 'The type parameter is instantiated as a reference type' case below. */
9376 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9377 CHECK_CFG_EXCEPTION;
9383 if (constrained_partial_call) {
9384 gboolean need_box = TRUE;
9387 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9388 * called method is not known at compile time either. The called method could end up being
9389 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9390 * to box the receiver.
9391 * A simple solution would be to box always and make a normal virtual call, but that would
9392 * be bad performance wise.
9394 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9396 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9401 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9402 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9403 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9404 ins->klass = constrained_class;
9405 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9406 CHECK_CFG_EXCEPTION;
9407 } else if (need_box) {
9409 MonoBasicBlock *is_ref_bb, *end_bb;
9410 MonoInst *nonbox_call;
9413 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9415 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9416 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9418 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9420 NEW_BBLOCK (cfg, is_ref_bb);
9421 NEW_BBLOCK (cfg, end_bb);
9423 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9428 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9430 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9433 MONO_START_BB (cfg, is_ref_bb);
9434 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9435 ins->klass = constrained_class;
9436 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9437 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9439 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9441 MONO_START_BB (cfg, end_bb);
9444 nonbox_call->dreg = ins->dreg;
9447 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9448 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9449 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9452 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9454 * The type parameter is instantiated as a valuetype,
9455 * but that type doesn't override the method we're
9456 * calling, so we need to box `this'.
9458 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9459 ins->klass = constrained_class;
9460 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9461 CHECK_CFG_EXCEPTION;
9462 } else if (!constrained_class->valuetype) {
9463 int dreg = alloc_ireg_ref (cfg);
9466 * The type parameter is instantiated as a reference
9467 * type. We have a managed pointer on the stack, so
9468 * we need to dereference it here.
9470 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9471 ins->type = STACK_OBJ;
9474 if (cmethod->klass->valuetype) {
9477 /* Interface method */
9480 mono_class_setup_vtable (constrained_class);
9481 CHECK_TYPELOAD (constrained_class);
9482 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9484 TYPE_LOAD_ERROR (constrained_class);
9485 slot = mono_method_get_vtable_slot (cmethod);
9487 TYPE_LOAD_ERROR (cmethod->klass);
9488 cmethod = constrained_class->vtable [ioffset + slot];
9490 if (cmethod->klass == mono_defaults.enum_class) {
9491 /* Enum implements some interfaces, so treat this as the first case */
9492 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9493 ins->klass = constrained_class;
9494 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9495 CHECK_CFG_EXCEPTION;
9500 constrained_class = NULL;
9503 if (check_call_signature (cfg, fsig, sp))
9506 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9507 delegate_invoke = TRUE;
9509 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9510 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9511 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9519 * If the callee is a shared method, then its static cctor
9520 * might not get called after the call was patched.
9522 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9523 emit_class_init (cfg, cmethod->klass);
9524 CHECK_TYPELOAD (cmethod->klass);
9527 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9530 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9532 context_used = mini_method_check_context_used (cfg, cmethod);
9534 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9535 /* Generic method interface
9536 calls are resolved via a
9537 helper function and don't
9539 if (!cmethod_context || !cmethod_context->method_inst)
9540 pass_imt_from_rgctx = TRUE;
9544 * If a shared method calls another
9545 * shared method then the caller must
9546 * have a generic sharing context
9547 * because the magic trampoline
9548 * requires it. FIXME: We shouldn't
9549 * have to force the vtable/mrgctx
9550 * variable here. Instead there
9551 * should be a flag in the cfg to
9552 * request a generic sharing context.
9555 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9556 mono_get_vtable_var (cfg);
9561 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9563 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9565 CHECK_TYPELOAD (cmethod->klass);
9566 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9571 g_assert (!vtable_arg);
9573 if (!cfg->compile_aot) {
9575 * emit_get_rgctx_method () calls mono_class_vtable () so check
9576 * for type load errors before.
9578 mono_class_setup_vtable (cmethod->klass);
9579 CHECK_TYPELOAD (cmethod->klass);
9582 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9584 /* !marshalbyref is needed to properly handle generic methods + remoting */
9585 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9586 MONO_METHOD_IS_FINAL (cmethod)) &&
9587 !mono_class_is_marshalbyref (cmethod->klass)) {
9594 if (pass_imt_from_rgctx) {
9595 g_assert (!pass_vtable);
9597 imt_arg = emit_get_rgctx_method (cfg, context_used,
9598 cmethod, MONO_RGCTX_INFO_METHOD);
9602 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9604 /* Calling virtual generic methods */
9605 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9606 !(MONO_METHOD_IS_FINAL (cmethod) &&
9607 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9608 fsig->generic_param_count &&
9609 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9611 MonoInst *this_temp, *this_arg_temp, *store;
9612 MonoInst *iargs [4];
9614 g_assert (fsig->is_inflated);
9616 /* Prevent inlining of methods that contain indirect calls */
9617 INLINE_FAILURE ("virtual generic call");
9619 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9620 GSHAREDVT_FAILURE (*ip);
9622 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9623 g_assert (!imt_arg);
9625 g_assert (cmethod->is_inflated);
9626 imt_arg = emit_get_rgctx_method (cfg, context_used,
9627 cmethod, MONO_RGCTX_INFO_METHOD);
9628 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9630 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9631 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9632 MONO_ADD_INS (cfg->cbb, store);
9634 /* FIXME: This should be a managed pointer */
9635 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9637 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9638 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9639 cmethod, MONO_RGCTX_INFO_METHOD);
9640 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9641 addr = mono_emit_jit_icall (cfg,
9642 mono_helper_compile_generic_method, iargs);
9644 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9646 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9653 * Implement a workaround for the inherent races involved in locking:
9659 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9660 * try block, the Exit () won't be executed, see:
9661 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9662 * To work around this, we extend such try blocks to include the last x bytes
9663 * of the Monitor.Enter () call.
9665 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9666 MonoBasicBlock *tbb;
9668 GET_BBLOCK (cfg, tbb, ip + 5);
9670 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9671 * from Monitor.Enter like ArgumentNullException.
9673 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9674 /* Mark this bblock as needing to be extended */
9675 tbb->extend_try_block = TRUE;
9679 /* Conversion to a JIT intrinsic */
9680 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9681 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9682 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9690 if ((cfg->opt & MONO_OPT_INLINE) &&
9691 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9692 mono_method_check_inlining (cfg, cmethod)) {
9694 gboolean always = FALSE;
9696 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9697 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9698 /* Prevent inlining of methods that call wrappers */
9699 INLINE_FAILURE ("wrapper call");
9700 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9704 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9706 cfg->real_offset += 5;
9708 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9709 /* *sp is already set by inline_method */
9714 inline_costs += costs;
9720 /* Tail recursion elimination */
9721 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9722 gboolean has_vtargs = FALSE;
9725 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9726 INLINE_FAILURE ("tail call");
9728 /* keep it simple */
9729 for (i = fsig->param_count - 1; i >= 0; i--) {
9730 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9735 if (need_seq_point) {
9736 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9737 need_seq_point = FALSE;
9739 for (i = 0; i < n; ++i)
9740 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9741 MONO_INST_NEW (cfg, ins, OP_BR);
9742 MONO_ADD_INS (cfg->cbb, ins);
9743 tblock = start_bblock->out_bb [0];
9744 link_bblock (cfg, cfg->cbb, tblock);
9745 ins->inst_target_bb = tblock;
9746 start_new_bblock = 1;
9748 /* skip the CEE_RET, too */
9749 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9756 inline_costs += 10 * num_calls++;
9759 * Synchronized wrappers.
9760 * Its hard to determine where to replace a method with its synchronized
9761 * wrapper without causing an infinite recursion. The current solution is
9762 * to add the synchronized wrapper in the trampolines, and to
9763 * change the called method to a dummy wrapper, and resolve that wrapper
9764 * to the real method in mono_jit_compile_method ().
9766 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9767 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9768 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9769 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9773 * Making generic calls out of gsharedvt methods.
9774 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9775 * patching gshared method addresses into a gsharedvt method.
9777 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9778 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9779 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9780 MonoRgctxInfoType info_type;
9783 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9784 //GSHAREDVT_FAILURE (*ip);
9785 // disable for possible remoting calls
9786 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9787 GSHAREDVT_FAILURE (*ip);
9788 if (fsig->generic_param_count) {
9789 /* virtual generic call */
9790 g_assert (!imt_arg);
9791 /* Same as the virtual generic case above */
9792 imt_arg = emit_get_rgctx_method (cfg, context_used,
9793 cmethod, MONO_RGCTX_INFO_METHOD);
9794 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9796 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9797 /* This can happen when we call a fully instantiated iface method */
9798 imt_arg = emit_get_rgctx_method (cfg, context_used,
9799 cmethod, MONO_RGCTX_INFO_METHOD);
9804 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9805 keep_this_alive = sp [0];
9807 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9808 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9810 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9811 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9813 if (cfg->llvm_only) {
9814 // FIXME: Avoid initializing vtable_arg
9815 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9817 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9822 /* Generic sharing */
9825 * Use this if the callee is gsharedvt sharable too, since
9826 * at runtime we might find an instantiation so the call cannot
9827 * be patched (the 'no_patch' code path in mini-trampolines.c).
9829 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9830 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9831 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9832 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9833 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9834 INLINE_FAILURE ("gshared");
9836 g_assert (cfg->gshared && cmethod);
9840 * We are compiling a call to a
9841 * generic method from shared code,
9842 * which means that we have to look up
9843 * the method in the rgctx and do an
9847 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9849 if (cfg->llvm_only) {
9850 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9851 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9853 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9854 // FIXME: Avoid initializing imt_arg/vtable_arg
9855 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9857 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9858 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9863 /* Direct calls to icalls */
9865 MonoMethod *wrapper;
9868 /* Inline the wrapper */
9869 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9871 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9872 g_assert (costs > 0);
9873 cfg->real_offset += 5;
9875 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9876 /* *sp is already set by inline_method */
9881 inline_costs += costs;
9890 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9891 MonoInst *val = sp [fsig->param_count];
9893 if (val->type == STACK_OBJ) {
9894 MonoInst *iargs [2];
9899 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9902 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9903 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9904 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9905 emit_write_barrier (cfg, addr, val);
9906 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9907 GSHAREDVT_FAILURE (*ip);
9908 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9909 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9911 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9912 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9913 if (!cmethod->klass->element_class->valuetype && !readonly)
9914 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9915 CHECK_TYPELOAD (cmethod->klass);
9918 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9921 g_assert_not_reached ();
9928 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9932 /* Tail prefix / tail call optimization */
9934 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9935 /* FIXME: runtime generic context pointer for jumps? */
9936 /* FIXME: handle this for generic sharing eventually */
9937 if ((ins_flag & MONO_INST_TAILCALL) &&
9938 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9939 supported_tail_call = TRUE;
9941 if (supported_tail_call) {
9944 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9945 INLINE_FAILURE ("tail call");
9947 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9949 if (cfg->backend->have_op_tail_call) {
9950 /* Handle tail calls similarly to normal calls */
9953 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9955 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9956 call->tail_call = TRUE;
9957 call->method = cmethod;
9958 call->signature = mono_method_signature (cmethod);
9961 * We implement tail calls by storing the actual arguments into the
9962 * argument variables, then emitting a CEE_JMP.
9964 for (i = 0; i < n; ++i) {
9965 /* Prevent argument from being register allocated */
9966 arg_array [i]->flags |= MONO_INST_VOLATILE;
9967 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9969 ins = (MonoInst*)call;
9970 ins->inst_p0 = cmethod;
9971 ins->inst_p1 = arg_array [0];
9972 MONO_ADD_INS (cfg->cbb, ins);
9973 link_bblock (cfg, cfg->cbb, end_bblock);
9974 start_new_bblock = 1;
9976 // FIXME: Eliminate unreachable epilogs
9979 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9980 * only reachable from this call.
9982 GET_BBLOCK (cfg, tblock, ip + 5);
9983 if (tblock == cfg->cbb || tblock->in_count == 0)
9992 * Virtual calls in llvm-only mode.
9994 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9995 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
10000 INLINE_FAILURE ("call");
10001 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
10002 imt_arg, vtable_arg);
10004 if (tail_call && !cfg->llvm_only) {
10005 link_bblock (cfg, cfg->cbb, end_bblock);
10006 start_new_bblock = 1;
10008 // FIXME: Eliminate unreachable epilogs
10011 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
10012 * only reachable from this call.
10014 GET_BBLOCK (cfg, tblock, ip + 5);
10015 if (tblock == cfg->cbb || tblock->in_count == 0)
10022 /* End of call, INS should contain the result of the call, if any */
10024 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10027 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10032 if (keep_this_alive) {
10033 MonoInst *dummy_use;
10035 /* See mono_emit_method_call_full () */
10036 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10039 CHECK_CFG_EXCEPTION;
10043 g_assert (*ip == CEE_RET);
10047 constrained_class = NULL;
10048 if (need_seq_point)
10049 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10053 if (cfg->method != method) {
10054 /* return from inlined method */
10056 * If in_count == 0, that means the ret is unreachable due to
10057 * being preceeded by a throw. In that case, inline_method () will
10058 * handle setting the return value
10059 * (test case: test_0_inline_throw ()).
10061 if (return_var && cfg->cbb->in_count) {
10062 MonoType *ret_type = mono_method_signature (method)->ret;
10068 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10071 //g_assert (returnvar != -1);
10072 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10073 cfg->ret_var_set = TRUE;
10076 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10078 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10079 emit_pop_lmf (cfg);
10082 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10084 if (seq_points && !sym_seq_points) {
10086 * Place a seq point here too even through the IL stack is not
10087 * empty, so a step over on
10090 * will work correctly.
10092 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10093 MONO_ADD_INS (cfg->cbb, ins);
10096 g_assert (!return_var);
10100 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10103 emit_setret (cfg, *sp);
10106 if (sp != stack_start)
10108 MONO_INST_NEW (cfg, ins, OP_BR);
10110 ins->inst_target_bb = end_bblock;
10111 MONO_ADD_INS (cfg->cbb, ins);
10112 link_bblock (cfg, cfg->cbb, end_bblock);
10113 start_new_bblock = 1;
10117 MONO_INST_NEW (cfg, ins, OP_BR);
10119 target = ip + 1 + (signed char)(*ip);
10121 GET_BBLOCK (cfg, tblock, target);
10122 link_bblock (cfg, cfg->cbb, tblock);
10123 ins->inst_target_bb = tblock;
10124 if (sp != stack_start) {
10125 handle_stack_args (cfg, stack_start, sp - stack_start);
10127 CHECK_UNVERIFIABLE (cfg);
10129 MONO_ADD_INS (cfg->cbb, ins);
10130 start_new_bblock = 1;
10131 inline_costs += BRANCH_COST;
10145 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10147 target = ip + 1 + *(signed char*)ip;
10150 ADD_BINCOND (NULL);
10153 inline_costs += BRANCH_COST;
10157 MONO_INST_NEW (cfg, ins, OP_BR);
10160 target = ip + 4 + (gint32)read32(ip);
10162 GET_BBLOCK (cfg, tblock, target);
10163 link_bblock (cfg, cfg->cbb, tblock);
10164 ins->inst_target_bb = tblock;
10165 if (sp != stack_start) {
10166 handle_stack_args (cfg, stack_start, sp - stack_start);
10168 CHECK_UNVERIFIABLE (cfg);
10171 MONO_ADD_INS (cfg->cbb, ins);
10173 start_new_bblock = 1;
10174 inline_costs += BRANCH_COST;
10176 case CEE_BRFALSE_S:
10181 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10182 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10183 guint32 opsize = is_short ? 1 : 4;
10185 CHECK_OPSIZE (opsize);
10187 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10190 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10195 GET_BBLOCK (cfg, tblock, target);
10196 link_bblock (cfg, cfg->cbb, tblock);
10197 GET_BBLOCK (cfg, tblock, ip);
10198 link_bblock (cfg, cfg->cbb, tblock);
10200 if (sp != stack_start) {
10201 handle_stack_args (cfg, stack_start, sp - stack_start);
10202 CHECK_UNVERIFIABLE (cfg);
10205 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10206 cmp->sreg1 = sp [0]->dreg;
10207 type_from_op (cfg, cmp, sp [0], NULL);
10210 #if SIZEOF_REGISTER == 4
10211 if (cmp->opcode == OP_LCOMPARE_IMM) {
10212 /* Convert it to OP_LCOMPARE */
10213 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10214 ins->type = STACK_I8;
10215 ins->dreg = alloc_dreg (cfg, STACK_I8);
10217 MONO_ADD_INS (cfg->cbb, ins);
10218 cmp->opcode = OP_LCOMPARE;
10219 cmp->sreg2 = ins->dreg;
10222 MONO_ADD_INS (cfg->cbb, cmp);
10224 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10225 type_from_op (cfg, ins, sp [0], NULL);
10226 MONO_ADD_INS (cfg->cbb, ins);
10227 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10228 GET_BBLOCK (cfg, tblock, target);
10229 ins->inst_true_bb = tblock;
10230 GET_BBLOCK (cfg, tblock, ip);
10231 ins->inst_false_bb = tblock;
10232 start_new_bblock = 2;
10235 inline_costs += BRANCH_COST;
10250 MONO_INST_NEW (cfg, ins, *ip);
10252 target = ip + 4 + (gint32)read32(ip);
10255 ADD_BINCOND (NULL);
10258 inline_costs += BRANCH_COST;
10262 MonoBasicBlock **targets;
10263 MonoBasicBlock *default_bblock;
10264 MonoJumpInfoBBTable *table;
10265 int offset_reg = alloc_preg (cfg);
10266 int target_reg = alloc_preg (cfg);
10267 int table_reg = alloc_preg (cfg);
10268 int sum_reg = alloc_preg (cfg);
10269 gboolean use_op_switch;
10273 n = read32 (ip + 1);
10276 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10280 CHECK_OPSIZE (n * sizeof (guint32));
10281 target = ip + n * sizeof (guint32);
10283 GET_BBLOCK (cfg, default_bblock, target);
10284 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10286 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10287 for (i = 0; i < n; ++i) {
10288 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10289 targets [i] = tblock;
10290 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10294 if (sp != stack_start) {
10296 * Link the current bb with the targets as well, so handle_stack_args
10297 * will set their in_stack correctly.
10299 link_bblock (cfg, cfg->cbb, default_bblock);
10300 for (i = 0; i < n; ++i)
10301 link_bblock (cfg, cfg->cbb, targets [i]);
10303 handle_stack_args (cfg, stack_start, sp - stack_start);
10305 CHECK_UNVERIFIABLE (cfg);
10307 /* Undo the links */
10308 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10309 for (i = 0; i < n; ++i)
10310 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10313 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10314 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10316 for (i = 0; i < n; ++i)
10317 link_bblock (cfg, cfg->cbb, targets [i]);
10319 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10320 table->table = targets;
10321 table->table_size = n;
10323 use_op_switch = FALSE;
10325 /* ARM implements SWITCH statements differently */
10326 /* FIXME: Make it use the generic implementation */
10327 if (!cfg->compile_aot)
10328 use_op_switch = TRUE;
10331 if (COMPILE_LLVM (cfg))
10332 use_op_switch = TRUE;
10334 cfg->cbb->has_jump_table = 1;
10336 if (use_op_switch) {
10337 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10338 ins->sreg1 = src1->dreg;
10339 ins->inst_p0 = table;
10340 ins->inst_many_bb = targets;
10341 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10342 MONO_ADD_INS (cfg->cbb, ins);
10344 if (sizeof (gpointer) == 8)
10345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10349 #if SIZEOF_REGISTER == 8
10350 /* The upper word might not be zero, and we add it to a 64 bit address later */
10351 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10354 if (cfg->compile_aot) {
10355 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10357 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10358 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10359 ins->inst_p0 = table;
10360 ins->dreg = table_reg;
10361 MONO_ADD_INS (cfg->cbb, ins);
10364 /* FIXME: Use load_memindex */
10365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10367 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10369 start_new_bblock = 1;
10370 inline_costs += (BRANCH_COST * 2);
10383 case CEE_LDIND_REF:
10390 dreg = alloc_freg (cfg);
10393 dreg = alloc_lreg (cfg);
10395 case CEE_LDIND_REF:
10396 dreg = alloc_ireg_ref (cfg);
10399 dreg = alloc_preg (cfg);
10402 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10403 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10404 if (*ip == CEE_LDIND_R4)
10405 ins->type = cfg->r4_stack_type;
10406 ins->flags |= ins_flag;
10407 MONO_ADD_INS (cfg->cbb, ins);
10409 if (ins_flag & MONO_INST_VOLATILE) {
10410 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10411 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10416 case CEE_STIND_REF:
10427 if (ins_flag & MONO_INST_VOLATILE) {
10428 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10429 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10432 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10433 ins->flags |= ins_flag;
10436 MONO_ADD_INS (cfg->cbb, ins);
10438 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10439 emit_write_barrier (cfg, sp [0], sp [1]);
10448 MONO_INST_NEW (cfg, ins, (*ip));
10450 ins->sreg1 = sp [0]->dreg;
10451 ins->sreg2 = sp [1]->dreg;
10452 type_from_op (cfg, ins, sp [0], sp [1]);
10454 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10456 /* Use the immediate opcodes if possible */
10457 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10458 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10459 if (imm_opcode != -1) {
10460 ins->opcode = imm_opcode;
10461 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10464 NULLIFY_INS (sp [1]);
10468 MONO_ADD_INS ((cfg)->cbb, (ins));
10470 *sp++ = mono_decompose_opcode (cfg, ins);
10487 MONO_INST_NEW (cfg, ins, (*ip));
10489 ins->sreg1 = sp [0]->dreg;
10490 ins->sreg2 = sp [1]->dreg;
10491 type_from_op (cfg, ins, sp [0], sp [1]);
10493 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10494 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10496 /* FIXME: Pass opcode to is_inst_imm */
10498 /* Use the immediate opcodes if possible */
10499 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10500 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10501 if (imm_opcode != -1) {
10502 ins->opcode = imm_opcode;
10503 if (sp [1]->opcode == OP_I8CONST) {
10504 #if SIZEOF_REGISTER == 8
10505 ins->inst_imm = sp [1]->inst_l;
10507 ins->inst_ls_word = sp [1]->inst_ls_word;
10508 ins->inst_ms_word = sp [1]->inst_ms_word;
10512 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10515 /* Might be followed by an instruction added by add_widen_op */
10516 if (sp [1]->next == NULL)
10517 NULLIFY_INS (sp [1]);
10520 MONO_ADD_INS ((cfg)->cbb, (ins));
10522 *sp++ = mono_decompose_opcode (cfg, ins);
10535 case CEE_CONV_OVF_I8:
10536 case CEE_CONV_OVF_U8:
10537 case CEE_CONV_R_UN:
10540 /* Special case this earlier so we have long constants in the IR */
10541 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10542 int data = sp [-1]->inst_c0;
10543 sp [-1]->opcode = OP_I8CONST;
10544 sp [-1]->type = STACK_I8;
10545 #if SIZEOF_REGISTER == 8
10546 if ((*ip) == CEE_CONV_U8)
10547 sp [-1]->inst_c0 = (guint32)data;
10549 sp [-1]->inst_c0 = data;
10551 sp [-1]->inst_ls_word = data;
10552 if ((*ip) == CEE_CONV_U8)
10553 sp [-1]->inst_ms_word = 0;
10555 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10557 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10564 case CEE_CONV_OVF_I4:
10565 case CEE_CONV_OVF_I1:
10566 case CEE_CONV_OVF_I2:
10567 case CEE_CONV_OVF_I:
10568 case CEE_CONV_OVF_U:
10571 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10572 ADD_UNOP (CEE_CONV_OVF_I8);
10579 case CEE_CONV_OVF_U1:
10580 case CEE_CONV_OVF_U2:
10581 case CEE_CONV_OVF_U4:
10584 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10585 ADD_UNOP (CEE_CONV_OVF_U8);
10592 case CEE_CONV_OVF_I1_UN:
10593 case CEE_CONV_OVF_I2_UN:
10594 case CEE_CONV_OVF_I4_UN:
10595 case CEE_CONV_OVF_I8_UN:
10596 case CEE_CONV_OVF_U1_UN:
10597 case CEE_CONV_OVF_U2_UN:
10598 case CEE_CONV_OVF_U4_UN:
10599 case CEE_CONV_OVF_U8_UN:
10600 case CEE_CONV_OVF_I_UN:
10601 case CEE_CONV_OVF_U_UN:
10608 CHECK_CFG_EXCEPTION;
10612 case CEE_ADD_OVF_UN:
10614 case CEE_MUL_OVF_UN:
10616 case CEE_SUB_OVF_UN:
10622 GSHAREDVT_FAILURE (*ip);
10625 token = read32 (ip + 1);
10626 klass = mini_get_class (method, token, generic_context);
10627 CHECK_TYPELOAD (klass);
10629 if (generic_class_is_reference_type (cfg, klass)) {
10630 MonoInst *store, *load;
10631 int dreg = alloc_ireg_ref (cfg);
10633 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10634 load->flags |= ins_flag;
10635 MONO_ADD_INS (cfg->cbb, load);
10637 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10638 store->flags |= ins_flag;
10639 MONO_ADD_INS (cfg->cbb, store);
10641 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10642 emit_write_barrier (cfg, sp [0], sp [1]);
10644 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10650 int loc_index = -1;
10656 token = read32 (ip + 1);
10657 klass = mini_get_class (method, token, generic_context);
10658 CHECK_TYPELOAD (klass);
10660 /* Optimize the common ldobj+stloc combination */
10663 loc_index = ip [6];
10670 loc_index = ip [5] - CEE_STLOC_0;
10677 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10678 CHECK_LOCAL (loc_index);
10680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10681 ins->dreg = cfg->locals [loc_index]->dreg;
10682 ins->flags |= ins_flag;
10685 if (ins_flag & MONO_INST_VOLATILE) {
10686 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10687 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10693 /* Optimize the ldobj+stobj combination */
10694 /* The reference case ends up being a load+store anyway */
10695 /* Skip this if the operation is volatile. */
10696 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10701 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10708 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10709 ins->flags |= ins_flag;
10712 if (ins_flag & MONO_INST_VOLATILE) {
10713 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10714 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10723 CHECK_STACK_OVF (1);
10725 n = read32 (ip + 1);
10727 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10728 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10729 ins->type = STACK_OBJ;
10732 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10733 MonoInst *iargs [1];
10734 char *str = (char *)mono_method_get_wrapper_data (method, n);
10736 if (cfg->compile_aot)
10737 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10739 EMIT_NEW_PCONST (cfg, iargs [0], str);
10740 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10742 if (cfg->opt & MONO_OPT_SHARED) {
10743 MonoInst *iargs [3];
10745 if (cfg->compile_aot) {
10746 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10748 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10749 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10750 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10751 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10752 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10755 if (cfg->cbb->out_of_line) {
10756 MonoInst *iargs [2];
10758 if (image == mono_defaults.corlib) {
10760 * Avoid relocations in AOT and save some space by using a
10761 * version of helper_ldstr specialized to mscorlib.
10763 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10764 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10766 /* Avoid creating the string object */
10767 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10768 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10769 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10773 if (cfg->compile_aot) {
10774 NEW_LDSTRCONST (cfg, ins, image, n);
10776 MONO_ADD_INS (cfg->cbb, ins);
10779 NEW_PCONST (cfg, ins, NULL);
10780 ins->type = STACK_OBJ;
10781 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10785 OUT_OF_MEMORY_FAILURE;
10788 MONO_ADD_INS (cfg->cbb, ins);
10797 MonoInst *iargs [2];
10798 MonoMethodSignature *fsig;
10801 MonoInst *vtable_arg = NULL;
10804 token = read32 (ip + 1);
10805 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10808 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10811 mono_save_token_info (cfg, image, token, cmethod);
10813 if (!mono_class_init (cmethod->klass))
10814 TYPE_LOAD_ERROR (cmethod->klass);
10816 context_used = mini_method_check_context_used (cfg, cmethod);
10818 if (mono_security_core_clr_enabled ())
10819 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10821 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10822 emit_class_init (cfg, cmethod->klass);
10823 CHECK_TYPELOAD (cmethod->klass);
10827 if (cfg->gsharedvt) {
10828 if (mini_is_gsharedvt_variable_signature (sig))
10829 GSHAREDVT_FAILURE (*ip);
10833 n = fsig->param_count;
10837 * Generate smaller code for the common newobj <exception> instruction in
10838 * argument checking code.
10840 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10841 is_exception_class (cmethod->klass) && n <= 2 &&
10842 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10843 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10844 MonoInst *iargs [3];
10848 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10851 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10854 iargs [1] = sp [0];
10855 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10858 iargs [1] = sp [0];
10859 iargs [2] = sp [1];
10860 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10863 g_assert_not_reached ();
10871 /* move the args to allow room for 'this' in the first position */
10877 /* check_call_signature () requires sp[0] to be set */
10878 this_ins.type = STACK_OBJ;
10879 sp [0] = &this_ins;
10880 if (check_call_signature (cfg, fsig, sp))
10885 if (mini_class_is_system_array (cmethod->klass)) {
10886 *sp = emit_get_rgctx_method (cfg, context_used,
10887 cmethod, MONO_RGCTX_INFO_METHOD);
10889 /* Avoid varargs in the common case */
10890 if (fsig->param_count == 1)
10891 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10892 else if (fsig->param_count == 2)
10893 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10894 else if (fsig->param_count == 3)
10895 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10896 else if (fsig->param_count == 4)
10897 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10899 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10900 } else if (cmethod->string_ctor) {
10901 g_assert (!context_used);
10902 g_assert (!vtable_arg);
10903 /* we simply pass a null pointer */
10904 EMIT_NEW_PCONST (cfg, *sp, NULL);
10905 /* now call the string ctor */
10906 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10908 if (cmethod->klass->valuetype) {
10909 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10910 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10911 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10916 * The code generated by mini_emit_virtual_call () expects
10917 * iargs [0] to be a boxed instance, but luckily the vcall
10918 * will be transformed into a normal call there.
10920 } else if (context_used) {
10921 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10924 MonoVTable *vtable = NULL;
10926 if (!cfg->compile_aot)
10927 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10928 CHECK_TYPELOAD (cmethod->klass);
10931 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10932 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10933 * As a workaround, we call class cctors before allocating objects.
10935 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10936 emit_class_init (cfg, cmethod->klass);
10937 if (cfg->verbose_level > 2)
10938 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10939 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10942 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10945 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10948 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10950 /* Now call the actual ctor */
10951 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10952 CHECK_CFG_EXCEPTION;
10955 if (alloc == NULL) {
10957 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10958 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10966 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10967 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10970 case CEE_CASTCLASS:
10975 token = read32 (ip + 1);
10976 klass = mini_get_class (method, token, generic_context);
10977 CHECK_TYPELOAD (klass);
10978 if (sp [0]->type != STACK_OBJ)
10981 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10982 ins->dreg = alloc_preg (cfg);
10983 ins->sreg1 = (*sp)->dreg;
10984 ins->klass = klass;
10985 ins->type = STACK_OBJ;
10986 MONO_ADD_INS (cfg->cbb, ins);
10988 CHECK_CFG_EXCEPTION;
10992 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10995 case CEE_UNBOX_ANY: {
10996 MonoInst *res, *addr;
11001 token = read32 (ip + 1);
11002 klass = mini_get_class (method, token, generic_context);
11003 CHECK_TYPELOAD (klass);
11005 mono_save_token_info (cfg, image, token, klass);
11007 context_used = mini_class_check_context_used (cfg, klass);
11009 if (mini_is_gsharedvt_klass (klass)) {
11010 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11012 } else if (generic_class_is_reference_type (cfg, klass)) {
11013 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
11014 res->dreg = alloc_preg (cfg);
11015 res->sreg1 = (*sp)->dreg;
11016 res->klass = klass;
11017 res->type = STACK_OBJ;
11018 MONO_ADD_INS (cfg->cbb, res);
11019 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
11020 } else if (mono_class_is_nullable (klass)) {
11021 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11023 addr = handle_unbox (cfg, klass, sp, context_used);
11025 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11036 MonoClass *enum_class;
11037 MonoMethod *has_flag;
11043 token = read32 (ip + 1);
11044 klass = mini_get_class (method, token, generic_context);
11045 CHECK_TYPELOAD (klass);
11047 mono_save_token_info (cfg, image, token, klass);
11049 context_used = mini_class_check_context_used (cfg, klass);
11051 if (generic_class_is_reference_type (cfg, klass)) {
11057 if (klass == mono_defaults.void_class)
11059 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11061 /* frequent check in generic code: box (struct), brtrue */
11066 * <push int/long ptr>
11069 * constrained. MyFlags
11070 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11072 * If we find this sequence and the operand types on box and constrained
11073 * are equal, we can emit a specialized instruction sequence instead of
11074 * the very slow HasFlag () call.
11076 if ((cfg->opt & MONO_OPT_INTRINS) &&
11077 /* Cheap checks first. */
11078 ip + 5 + 6 + 5 < end &&
11079 ip [5] == CEE_PREFIX1 &&
11080 ip [6] == CEE_CONSTRAINED_ &&
11081 ip [11] == CEE_CALLVIRT &&
11082 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11083 mono_class_is_enum (klass) &&
11084 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11085 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11086 has_flag->klass == mono_defaults.enum_class &&
11087 !strcmp (has_flag->name, "HasFlag") &&
11088 has_flag->signature->hasthis &&
11089 has_flag->signature->param_count == 1) {
11090 CHECK_TYPELOAD (enum_class);
11092 if (enum_class == klass) {
11093 MonoInst *enum_this, *enum_flag;
11098 enum_this = sp [0];
11099 enum_flag = sp [1];
11101 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11106 // FIXME: LLVM can't handle the inconsistent bb linking
11107 if (!mono_class_is_nullable (klass) &&
11108 !mini_is_gsharedvt_klass (klass) &&
11109 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11110 (ip [5] == CEE_BRTRUE ||
11111 ip [5] == CEE_BRTRUE_S ||
11112 ip [5] == CEE_BRFALSE ||
11113 ip [5] == CEE_BRFALSE_S)) {
11114 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11116 MonoBasicBlock *true_bb, *false_bb;
11120 if (cfg->verbose_level > 3) {
11121 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11122 printf ("<box+brtrue opt>\n");
11127 case CEE_BRFALSE_S:
11130 target = ip + 1 + (signed char)(*ip);
11137 target = ip + 4 + (gint)(read32 (ip));
11141 g_assert_not_reached ();
11145 * We need to link both bblocks, since it is needed for handling stack
11146 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11147 * Branching to only one of them would lead to inconsistencies, so
11148 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11150 GET_BBLOCK (cfg, true_bb, target);
11151 GET_BBLOCK (cfg, false_bb, ip);
11153 mono_link_bblock (cfg, cfg->cbb, true_bb);
11154 mono_link_bblock (cfg, cfg->cbb, false_bb);
11156 if (sp != stack_start) {
11157 handle_stack_args (cfg, stack_start, sp - stack_start);
11159 CHECK_UNVERIFIABLE (cfg);
11162 if (COMPILE_LLVM (cfg)) {
11163 dreg = alloc_ireg (cfg);
11164 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11167 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11169 /* The JIT can't eliminate the iconst+compare */
11170 MONO_INST_NEW (cfg, ins, OP_BR);
11171 ins->inst_target_bb = is_true ? true_bb : false_bb;
11172 MONO_ADD_INS (cfg->cbb, ins);
11175 start_new_bblock = 1;
11179 *sp++ = handle_box (cfg, val, klass, context_used);
11181 CHECK_CFG_EXCEPTION;
11190 token = read32 (ip + 1);
11191 klass = mini_get_class (method, token, generic_context);
11192 CHECK_TYPELOAD (klass);
11194 mono_save_token_info (cfg, image, token, klass);
11196 context_used = mini_class_check_context_used (cfg, klass);
11198 if (mono_class_is_nullable (klass)) {
11201 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11202 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11206 ins = handle_unbox (cfg, klass, sp, context_used);
11219 MonoClassField *field;
11220 #ifndef DISABLE_REMOTING
11224 gboolean is_instance;
11226 gpointer addr = NULL;
11227 gboolean is_special_static;
11229 MonoInst *store_val = NULL;
11230 MonoInst *thread_ins;
11233 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11235 if (op == CEE_STFLD) {
11238 store_val = sp [1];
11243 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11245 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11248 if (op == CEE_STSFLD) {
11251 store_val = sp [0];
11256 token = read32 (ip + 1);
11257 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11258 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11259 klass = field->parent;
11262 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11265 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11266 FIELD_ACCESS_FAILURE (method, field);
11267 mono_class_init (klass);
11269 /* if the class is Critical then transparent code cannot access it's fields */
11270 if (!is_instance && mono_security_core_clr_enabled ())
11271 ensure_method_is_allowed_to_access_field (cfg, method, field);
11273 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11274 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11275 if (mono_security_core_clr_enabled ())
11276 ensure_method_is_allowed_to_access_field (cfg, method, field);
11279 ftype = mono_field_get_type (field);
11282 * LDFLD etc. is usable on static fields as well, so convert those cases to
11285 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11297 g_assert_not_reached ();
11299 is_instance = FALSE;
11302 context_used = mini_class_check_context_used (cfg, klass);
11304 /* INSTANCE CASE */
11306 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11307 if (op == CEE_STFLD) {
11308 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11310 #ifndef DISABLE_REMOTING
11311 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11312 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11313 MonoInst *iargs [5];
11315 GSHAREDVT_FAILURE (op);
11317 iargs [0] = sp [0];
11318 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11319 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11320 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11322 iargs [4] = sp [1];
11324 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11325 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11326 iargs, ip, cfg->real_offset, TRUE);
11327 CHECK_CFG_EXCEPTION;
11328 g_assert (costs > 0);
11330 cfg->real_offset += 5;
11332 inline_costs += costs;
11334 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11339 MonoInst *store, *wbarrier_ptr_ins = NULL;
11341 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11343 if (ins_flag & MONO_INST_VOLATILE) {
11344 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11345 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11348 if (mini_is_gsharedvt_klass (klass)) {
11349 MonoInst *offset_ins;
11351 context_used = mini_class_check_context_used (cfg, klass);
11353 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11354 /* The value is offset by 1 */
11355 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11356 dreg = alloc_ireg_mp (cfg);
11357 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11358 wbarrier_ptr_ins = ins;
11359 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11360 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11362 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11364 if (sp [0]->opcode != OP_LDADDR)
11365 store->flags |= MONO_INST_FAULT;
11367 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11368 if (mini_is_gsharedvt_klass (klass)) {
11369 g_assert (wbarrier_ptr_ins);
11370 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
11372 /* insert call to write barrier */
11376 dreg = alloc_ireg_mp (cfg);
11377 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11378 emit_write_barrier (cfg, ptr, sp [1]);
11382 store->flags |= ins_flag;
11389 #ifndef DISABLE_REMOTING
11390 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11391 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11392 MonoInst *iargs [4];
11394 GSHAREDVT_FAILURE (op);
11396 iargs [0] = sp [0];
11397 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11398 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11399 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11400 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11401 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11402 iargs, ip, cfg->real_offset, TRUE);
11403 CHECK_CFG_EXCEPTION;
11404 g_assert (costs > 0);
11406 cfg->real_offset += 5;
11410 inline_costs += costs;
11412 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11418 if (sp [0]->type == STACK_VTYPE) {
11421 /* Have to compute the address of the variable */
11423 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11425 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11427 g_assert (var->klass == klass);
11429 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11433 if (op == CEE_LDFLDA) {
11434 if (sp [0]->type == STACK_OBJ) {
11435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11436 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11439 dreg = alloc_ireg_mp (cfg);
11441 if (mini_is_gsharedvt_klass (klass)) {
11442 MonoInst *offset_ins;
11444 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11445 /* The value is offset by 1 */
11446 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11447 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11449 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11451 ins->klass = mono_class_from_mono_type (field->type);
11452 ins->type = STACK_MP;
11457 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11459 if (mini_is_gsharedvt_klass (klass)) {
11460 MonoInst *offset_ins;
11462 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11463 /* The value is offset by 1 */
11464 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11465 dreg = alloc_ireg_mp (cfg);
11466 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11467 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11469 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11471 load->flags |= ins_flag;
11472 if (sp [0]->opcode != OP_LDADDR)
11473 load->flags |= MONO_INST_FAULT;
11485 context_used = mini_class_check_context_used (cfg, klass);
11487 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
11488 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
11492 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11493 * to be called here.
11495 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11496 mono_class_vtable (cfg->domain, klass);
11497 CHECK_TYPELOAD (klass);
11499 mono_domain_lock (cfg->domain);
11500 if (cfg->domain->special_static_fields)
11501 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11502 mono_domain_unlock (cfg->domain);
11504 is_special_static = mono_class_field_is_special_static (field);
11506 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11507 thread_ins = mono_get_thread_intrinsic (cfg);
11511 /* Generate IR to compute the field address */
11512 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11514 * Fast access to TLS data
11515 * Inline version of get_thread_static_data () in
11519 int idx, static_data_reg, array_reg, dreg;
11521 GSHAREDVT_FAILURE (op);
11523 MONO_ADD_INS (cfg->cbb, thread_ins);
11524 static_data_reg = alloc_ireg (cfg);
11525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11527 if (cfg->compile_aot) {
11528 int offset_reg, offset2_reg, idx_reg;
11530 /* For TLS variables, this will return the TLS offset */
11531 EMIT_NEW_SFLDACONST (cfg, ins, field);
11532 offset_reg = ins->dreg;
11533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11534 idx_reg = alloc_ireg (cfg);
11535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11537 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11538 array_reg = alloc_ireg (cfg);
11539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11540 offset2_reg = alloc_ireg (cfg);
11541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11543 dreg = alloc_ireg (cfg);
11544 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11546 offset = (gsize)addr & 0x7fffffff;
11547 idx = offset & 0x3f;
11549 array_reg = alloc_ireg (cfg);
11550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11551 dreg = alloc_ireg (cfg);
11552 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11554 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11555 (cfg->compile_aot && is_special_static) ||
11556 (context_used && is_special_static)) {
11557 MonoInst *iargs [2];
11559 g_assert (field->parent);
11560 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11561 if (context_used) {
11562 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11563 field, MONO_RGCTX_INFO_CLASS_FIELD);
11565 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11567 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11568 } else if (context_used) {
11569 MonoInst *static_data;
11572 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11573 method->klass->name_space, method->klass->name, method->name,
11574 depth, field->offset);
11577 if (mono_class_needs_cctor_run (klass, method))
11578 emit_class_init (cfg, klass);
11581 * The pointer we're computing here is
11583 * super_info.static_data + field->offset
11585 static_data = emit_get_rgctx_klass (cfg, context_used,
11586 klass, MONO_RGCTX_INFO_STATIC_DATA);
11588 if (mini_is_gsharedvt_klass (klass)) {
11589 MonoInst *offset_ins;
11591 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11592 /* The value is offset by 1 */
11593 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11594 dreg = alloc_ireg_mp (cfg);
11595 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11596 } else if (field->offset == 0) {
11599 int addr_reg = mono_alloc_preg (cfg);
11600 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11602 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11603 MonoInst *iargs [2];
11605 g_assert (field->parent);
11606 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11607 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11608 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11610 MonoVTable *vtable = NULL;
11612 if (!cfg->compile_aot)
11613 vtable = mono_class_vtable (cfg->domain, klass);
11614 CHECK_TYPELOAD (klass);
11617 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11618 if (!(g_slist_find (class_inits, klass))) {
11619 emit_class_init (cfg, klass);
11620 if (cfg->verbose_level > 2)
11621 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11622 class_inits = g_slist_prepend (class_inits, klass);
11625 if (cfg->run_cctors) {
11626 /* This makes so that inline cannot trigger */
11627 /* .cctors: too many apps depend on them */
11628 /* running with a specific order... */
11630 if (! vtable->initialized)
11631 INLINE_FAILURE ("class init");
11632 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11633 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11634 goto exception_exit;
11638 if (cfg->compile_aot)
11639 EMIT_NEW_SFLDACONST (cfg, ins, field);
11642 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11644 EMIT_NEW_PCONST (cfg, ins, addr);
11647 MonoInst *iargs [1];
11648 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11649 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11653 /* Generate IR to do the actual load/store operation */
11655 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11656 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11657 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11660 if (op == CEE_LDSFLDA) {
11661 ins->klass = mono_class_from_mono_type (ftype);
11662 ins->type = STACK_PTR;
11664 } else if (op == CEE_STSFLD) {
11667 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11668 store->flags |= ins_flag;
11670 gboolean is_const = FALSE;
11671 MonoVTable *vtable = NULL;
11672 gpointer addr = NULL;
11674 if (!context_used) {
11675 vtable = mono_class_vtable (cfg->domain, klass);
11676 CHECK_TYPELOAD (klass);
11678 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11679 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11680 int ro_type = ftype->type;
11682 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11683 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11684 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11687 GSHAREDVT_FAILURE (op);
11689 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11692 case MONO_TYPE_BOOLEAN:
11694 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11698 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11701 case MONO_TYPE_CHAR:
11703 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11707 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11712 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11716 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11721 case MONO_TYPE_PTR:
11722 case MONO_TYPE_FNPTR:
11723 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11724 type_to_eval_stack_type ((cfg), field->type, *sp);
11727 case MONO_TYPE_STRING:
11728 case MONO_TYPE_OBJECT:
11729 case MONO_TYPE_CLASS:
11730 case MONO_TYPE_SZARRAY:
11731 case MONO_TYPE_ARRAY:
11732 if (!mono_gc_is_moving ()) {
11733 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11734 type_to_eval_stack_type ((cfg), field->type, *sp);
11742 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11747 case MONO_TYPE_VALUETYPE:
11757 CHECK_STACK_OVF (1);
11759 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11760 load->flags |= ins_flag;
11766 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11767 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11768 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11779 token = read32 (ip + 1);
11780 klass = mini_get_class (method, token, generic_context);
11781 CHECK_TYPELOAD (klass);
11782 if (ins_flag & MONO_INST_VOLATILE) {
11783 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11784 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11786 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11787 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11788 ins->flags |= ins_flag;
11789 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11790 generic_class_is_reference_type (cfg, klass)) {
11791 /* insert call to write barrier */
11792 emit_write_barrier (cfg, sp [0], sp [1]);
11804 const char *data_ptr;
11806 guint32 field_token;
11812 token = read32 (ip + 1);
11814 klass = mini_get_class (method, token, generic_context);
11815 CHECK_TYPELOAD (klass);
11817 context_used = mini_class_check_context_used (cfg, klass);
11819 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11820 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11821 ins->sreg1 = sp [0]->dreg;
11822 ins->type = STACK_I4;
11823 ins->dreg = alloc_ireg (cfg);
11824 MONO_ADD_INS (cfg->cbb, ins);
11825 *sp = mono_decompose_opcode (cfg, ins);
11828 if (context_used) {
11829 MonoInst *args [3];
11830 MonoClass *array_class = mono_array_class_get (klass, 1);
11831 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11833 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11836 args [0] = emit_get_rgctx_klass (cfg, context_used,
11837 array_class, MONO_RGCTX_INFO_VTABLE);
11842 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11844 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11846 if (cfg->opt & MONO_OPT_SHARED) {
11847 /* Decompose now to avoid problems with references to the domainvar */
11848 MonoInst *iargs [3];
11850 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11851 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11852 iargs [2] = sp [0];
11854 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11856 /* Decompose later since it is needed by abcrem */
11857 MonoClass *array_type = mono_array_class_get (klass, 1);
11858 mono_class_vtable (cfg->domain, array_type);
11859 CHECK_TYPELOAD (array_type);
11861 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11862 ins->dreg = alloc_ireg_ref (cfg);
11863 ins->sreg1 = sp [0]->dreg;
11864 ins->inst_newa_class = klass;
11865 ins->type = STACK_OBJ;
11866 ins->klass = array_type;
11867 MONO_ADD_INS (cfg->cbb, ins);
11868 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11869 cfg->cbb->has_array_access = TRUE;
11871 /* Needed so mono_emit_load_get_addr () gets called */
11872 mono_get_got_var (cfg);
11882 * we inline/optimize the initialization sequence if possible.
11883 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11884 * for small sizes open code the memcpy
11885 * ensure the rva field is big enough
11887 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11888 MonoMethod *memcpy_method = get_memcpy_method ();
11889 MonoInst *iargs [3];
11890 int add_reg = alloc_ireg_mp (cfg);
11892 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11893 if (cfg->compile_aot) {
11894 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11896 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11898 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11899 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11908 if (sp [0]->type != STACK_OBJ)
11911 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11912 ins->dreg = alloc_preg (cfg);
11913 ins->sreg1 = sp [0]->dreg;
11914 ins->type = STACK_I4;
11915 /* This flag will be inherited by the decomposition */
11916 ins->flags |= MONO_INST_FAULT;
11917 MONO_ADD_INS (cfg->cbb, ins);
11918 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11919 cfg->cbb->has_array_access = TRUE;
11927 if (sp [0]->type != STACK_OBJ)
11930 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11932 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11933 CHECK_TYPELOAD (klass);
11934 /* we need to make sure that this array is exactly the type it needs
11935 * to be for correctness. the wrappers are lax with their usage
11936 * so we need to ignore them here
11938 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11939 MonoClass *array_class = mono_array_class_get (klass, 1);
11940 mini_emit_check_array_type (cfg, sp [0], array_class);
11941 CHECK_TYPELOAD (array_class);
11945 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11950 case CEE_LDELEM_I1:
11951 case CEE_LDELEM_U1:
11952 case CEE_LDELEM_I2:
11953 case CEE_LDELEM_U2:
11954 case CEE_LDELEM_I4:
11955 case CEE_LDELEM_U4:
11956 case CEE_LDELEM_I8:
11958 case CEE_LDELEM_R4:
11959 case CEE_LDELEM_R8:
11960 case CEE_LDELEM_REF: {
11966 if (*ip == CEE_LDELEM) {
11968 token = read32 (ip + 1);
11969 klass = mini_get_class (method, token, generic_context);
11970 CHECK_TYPELOAD (klass);
11971 mono_class_init (klass);
11974 klass = array_access_to_klass (*ip);
11976 if (sp [0]->type != STACK_OBJ)
11979 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11981 if (mini_is_gsharedvt_variable_klass (klass)) {
11982 // FIXME-VT: OP_ICONST optimization
11983 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11984 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11985 ins->opcode = OP_LOADV_MEMBASE;
11986 } else if (sp [1]->opcode == OP_ICONST) {
11987 int array_reg = sp [0]->dreg;
11988 int index_reg = sp [1]->dreg;
11989 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11991 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11992 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11994 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11997 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11998 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12001 if (*ip == CEE_LDELEM)
12008 case CEE_STELEM_I1:
12009 case CEE_STELEM_I2:
12010 case CEE_STELEM_I4:
12011 case CEE_STELEM_I8:
12012 case CEE_STELEM_R4:
12013 case CEE_STELEM_R8:
12014 case CEE_STELEM_REF:
12019 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12021 if (*ip == CEE_STELEM) {
12023 token = read32 (ip + 1);
12024 klass = mini_get_class (method, token, generic_context);
12025 CHECK_TYPELOAD (klass);
12026 mono_class_init (klass);
12029 klass = array_access_to_klass (*ip);
12031 if (sp [0]->type != STACK_OBJ)
12034 emit_array_store (cfg, klass, sp, TRUE);
12036 if (*ip == CEE_STELEM)
12043 case CEE_CKFINITE: {
12047 if (cfg->llvm_only) {
12048 MonoInst *iargs [1];
12050 iargs [0] = sp [0];
12051 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12053 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12054 ins->sreg1 = sp [0]->dreg;
12055 ins->dreg = alloc_freg (cfg);
12056 ins->type = STACK_R8;
12057 MONO_ADD_INS (cfg->cbb, ins);
12059 *sp++ = mono_decompose_opcode (cfg, ins);
12065 case CEE_REFANYVAL: {
12066 MonoInst *src_var, *src;
12068 int klass_reg = alloc_preg (cfg);
12069 int dreg = alloc_preg (cfg);
12071 GSHAREDVT_FAILURE (*ip);
12074 MONO_INST_NEW (cfg, ins, *ip);
12077 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12078 CHECK_TYPELOAD (klass);
12080 context_used = mini_class_check_context_used (cfg, klass);
12083 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12085 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12086 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12089 if (context_used) {
12090 MonoInst *klass_ins;
12092 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12093 klass, MONO_RGCTX_INFO_KLASS);
12096 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12097 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12099 mini_emit_class_check (cfg, klass_reg, klass);
12101 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12102 ins->type = STACK_MP;
12103 ins->klass = klass;
12108 case CEE_MKREFANY: {
12109 MonoInst *loc, *addr;
12111 GSHAREDVT_FAILURE (*ip);
12114 MONO_INST_NEW (cfg, ins, *ip);
12117 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12118 CHECK_TYPELOAD (klass);
12120 context_used = mini_class_check_context_used (cfg, klass);
12122 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12123 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12125 if (context_used) {
12126 MonoInst *const_ins;
12127 int type_reg = alloc_preg (cfg);
12129 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12133 } else if (cfg->compile_aot) {
12134 int const_reg = alloc_preg (cfg);
12135 int type_reg = alloc_preg (cfg);
12137 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12138 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12142 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12143 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12147 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12148 ins->type = STACK_VTYPE;
12149 ins->klass = mono_defaults.typed_reference_class;
12154 case CEE_LDTOKEN: {
12156 MonoClass *handle_class;
12158 CHECK_STACK_OVF (1);
12161 n = read32 (ip + 1);
12163 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12164 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12165 handle = mono_method_get_wrapper_data (method, n);
12166 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12167 if (handle_class == mono_defaults.typehandle_class)
12168 handle = &((MonoClass*)handle)->byval_arg;
12171 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12176 mono_class_init (handle_class);
12177 if (cfg->gshared) {
12178 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12179 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12180 /* This case handles ldtoken
12181 of an open type, like for
12184 } else if (handle_class == mono_defaults.typehandle_class) {
12185 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12186 } else if (handle_class == mono_defaults.fieldhandle_class)
12187 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12188 else if (handle_class == mono_defaults.methodhandle_class)
12189 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12191 g_assert_not_reached ();
12194 if ((cfg->opt & MONO_OPT_SHARED) &&
12195 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12196 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12197 MonoInst *addr, *vtvar, *iargs [3];
12198 int method_context_used;
12200 method_context_used = mini_method_check_context_used (cfg, method);
12202 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12204 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12205 EMIT_NEW_ICONST (cfg, iargs [1], n);
12206 if (method_context_used) {
12207 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12208 method, MONO_RGCTX_INFO_METHOD);
12209 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12211 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12212 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12214 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12216 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12218 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12220 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12221 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12222 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12223 (cmethod->klass == mono_defaults.systemtype_class) &&
12224 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12225 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12227 mono_class_init (tclass);
12228 if (context_used) {
12229 ins = emit_get_rgctx_klass (cfg, context_used,
12230 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12231 } else if (cfg->compile_aot) {
12232 if (method->wrapper_type) {
12233 mono_error_init (&error); //got to do it since there are multiple conditionals below
12234 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12235 /* Special case for static synchronized wrappers */
12236 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12238 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12239 /* FIXME: n is not a normal token */
12241 EMIT_NEW_PCONST (cfg, ins, NULL);
12244 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12247 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12249 EMIT_NEW_PCONST (cfg, ins, rt);
12251 ins->type = STACK_OBJ;
12252 ins->klass = cmethod->klass;
12255 MonoInst *addr, *vtvar;
12257 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12259 if (context_used) {
12260 if (handle_class == mono_defaults.typehandle_class) {
12261 ins = emit_get_rgctx_klass (cfg, context_used,
12262 mono_class_from_mono_type ((MonoType *)handle),
12263 MONO_RGCTX_INFO_TYPE);
12264 } else if (handle_class == mono_defaults.methodhandle_class) {
12265 ins = emit_get_rgctx_method (cfg, context_used,
12266 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12267 } else if (handle_class == mono_defaults.fieldhandle_class) {
12268 ins = emit_get_rgctx_field (cfg, context_used,
12269 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12271 g_assert_not_reached ();
12273 } else if (cfg->compile_aot) {
12274 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12276 EMIT_NEW_PCONST (cfg, ins, handle);
12278 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12279 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12280 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12290 if (sp [-1]->type != STACK_OBJ)
12293 MONO_INST_NEW (cfg, ins, OP_THROW);
12295 ins->sreg1 = sp [0]->dreg;
12297 cfg->cbb->out_of_line = TRUE;
12298 MONO_ADD_INS (cfg->cbb, ins);
12299 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12300 MONO_ADD_INS (cfg->cbb, ins);
12303 link_bblock (cfg, cfg->cbb, end_bblock);
12304 start_new_bblock = 1;
12305 /* This can complicate code generation for llvm since the return value might not be defined */
12306 if (COMPILE_LLVM (cfg))
12307 INLINE_FAILURE ("throw");
12309 case CEE_ENDFINALLY:
12310 if (!ip_in_finally_clause (cfg, ip - header->code))
12312 /* mono_save_seq_point_info () depends on this */
12313 if (sp != stack_start)
12314 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12315 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12316 MONO_ADD_INS (cfg->cbb, ins);
12318 start_new_bblock = 1;
12321 * Control will leave the method so empty the stack, otherwise
12322 * the next basic block will start with a nonempty stack.
12324 while (sp != stack_start) {
12329 case CEE_LEAVE_S: {
12332 if (*ip == CEE_LEAVE) {
12334 target = ip + 5 + (gint32)read32(ip + 1);
12337 target = ip + 2 + (signed char)(ip [1]);
12340 /* empty the stack */
12341 while (sp != stack_start) {
12346 * If this leave statement is in a catch block, check for a
12347 * pending exception, and rethrow it if necessary.
12348 * We avoid doing this in runtime invoke wrappers, since those are called
12349 * by native code which excepts the wrapper to catch all exceptions.
12351 for (i = 0; i < header->num_clauses; ++i) {
12352 MonoExceptionClause *clause = &header->clauses [i];
12355 * Use <= in the final comparison to handle clauses with multiple
12356 * leave statements, like in bug #78024.
12357 * The ordering of the exception clauses guarantees that we find the
12358 * innermost clause.
12360 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12362 MonoBasicBlock *dont_throw;
12367 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12370 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12372 NEW_BBLOCK (cfg, dont_throw);
12375 * Currently, we always rethrow the abort exception, despite the
12376 * fact that this is not correct. See thread6.cs for an example.
12377 * But propagating the abort exception is more important than
12378 * getting the sematics right.
12380 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12381 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12382 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12384 MONO_START_BB (cfg, dont_throw);
12389 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12392 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12394 MonoExceptionClause *clause;
12396 for (tmp = handlers; tmp; tmp = tmp->next) {
12397 clause = (MonoExceptionClause *)tmp->data;
12398 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12400 link_bblock (cfg, cfg->cbb, tblock);
12401 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12402 ins->inst_target_bb = tblock;
12403 ins->inst_eh_block = clause;
12404 MONO_ADD_INS (cfg->cbb, ins);
12405 cfg->cbb->has_call_handler = 1;
12406 if (COMPILE_LLVM (cfg)) {
12407 MonoBasicBlock *target_bb;
12410 * Link the finally bblock with the target, since it will
12411 * conceptually branch there.
12413 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12414 GET_BBLOCK (cfg, target_bb, target);
12415 link_bblock (cfg, tblock, target_bb);
12418 g_list_free (handlers);
12421 MONO_INST_NEW (cfg, ins, OP_BR);
12422 MONO_ADD_INS (cfg->cbb, ins);
12423 GET_BBLOCK (cfg, tblock, target);
12424 link_bblock (cfg, cfg->cbb, tblock);
12425 ins->inst_target_bb = tblock;
12427 start_new_bblock = 1;
12429 if (*ip == CEE_LEAVE)
12438 * Mono specific opcodes
12440 case MONO_CUSTOM_PREFIX: {
12442 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12446 case CEE_MONO_ICALL: {
12448 MonoJitICallInfo *info;
12450 token = read32 (ip + 2);
12451 func = mono_method_get_wrapper_data (method, token);
12452 info = mono_find_jit_icall_by_addr (func);
12454 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12457 CHECK_STACK (info->sig->param_count);
12458 sp -= info->sig->param_count;
12460 ins = mono_emit_jit_icall (cfg, info->func, sp);
12461 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12465 inline_costs += 10 * num_calls++;
12469 case CEE_MONO_LDPTR_CARD_TABLE:
12470 case CEE_MONO_LDPTR_NURSERY_START:
12471 case CEE_MONO_LDPTR_NURSERY_BITS:
12472 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12473 CHECK_STACK_OVF (1);
12476 case CEE_MONO_LDPTR_CARD_TABLE:
12477 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12479 case CEE_MONO_LDPTR_NURSERY_START:
12480 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12482 case CEE_MONO_LDPTR_NURSERY_BITS:
12483 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12485 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12486 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12492 inline_costs += 10 * num_calls++;
12495 case CEE_MONO_LDPTR: {
12498 CHECK_STACK_OVF (1);
12500 token = read32 (ip + 2);
12502 ptr = mono_method_get_wrapper_data (method, token);
12503 EMIT_NEW_PCONST (cfg, ins, ptr);
12506 inline_costs += 10 * num_calls++;
12507 /* Can't embed random pointers into AOT code */
12511 case CEE_MONO_JIT_ICALL_ADDR: {
12512 MonoJitICallInfo *callinfo;
12515 CHECK_STACK_OVF (1);
12517 token = read32 (ip + 2);
12519 ptr = mono_method_get_wrapper_data (method, token);
12520 callinfo = mono_find_jit_icall_by_addr (ptr);
12521 g_assert (callinfo);
12522 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12525 inline_costs += 10 * num_calls++;
12528 case CEE_MONO_ICALL_ADDR: {
12529 MonoMethod *cmethod;
12532 CHECK_STACK_OVF (1);
12534 token = read32 (ip + 2);
12536 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12538 if (cfg->compile_aot) {
12539 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12541 ptr = mono_lookup_internal_call (cmethod);
12543 EMIT_NEW_PCONST (cfg, ins, ptr);
12549 case CEE_MONO_VTADDR: {
12550 MonoInst *src_var, *src;
12556 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12557 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12562 case CEE_MONO_NEWOBJ: {
12563 MonoInst *iargs [2];
12565 CHECK_STACK_OVF (1);
12567 token = read32 (ip + 2);
12568 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12569 mono_class_init (klass);
12570 NEW_DOMAINCONST (cfg, iargs [0]);
12571 MONO_ADD_INS (cfg->cbb, iargs [0]);
12572 NEW_CLASSCONST (cfg, iargs [1], klass);
12573 MONO_ADD_INS (cfg->cbb, iargs [1]);
12574 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12576 inline_costs += 10 * num_calls++;
12579 case CEE_MONO_OBJADDR:
12582 MONO_INST_NEW (cfg, ins, OP_MOVE);
12583 ins->dreg = alloc_ireg_mp (cfg);
12584 ins->sreg1 = sp [0]->dreg;
12585 ins->type = STACK_MP;
12586 MONO_ADD_INS (cfg->cbb, ins);
12590 case CEE_MONO_LDNATIVEOBJ:
12592 * Similar to LDOBJ, but instead load the unmanaged
12593 * representation of the vtype to the stack.
12598 token = read32 (ip + 2);
12599 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12600 g_assert (klass->valuetype);
12601 mono_class_init (klass);
12604 MonoInst *src, *dest, *temp;
12607 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12608 temp->backend.is_pinvoke = 1;
12609 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12610 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12612 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12613 dest->type = STACK_VTYPE;
12614 dest->klass = klass;
12620 case CEE_MONO_RETOBJ: {
12622 * Same as RET, but return the native representation of a vtype
12625 g_assert (cfg->ret);
12626 g_assert (mono_method_signature (method)->pinvoke);
12631 token = read32 (ip + 2);
12632 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12634 if (!cfg->vret_addr) {
12635 g_assert (cfg->ret_var_is_local);
12637 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12639 EMIT_NEW_RETLOADA (cfg, ins);
12641 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12643 if (sp != stack_start)
12646 MONO_INST_NEW (cfg, ins, OP_BR);
12647 ins->inst_target_bb = end_bblock;
12648 MONO_ADD_INS (cfg->cbb, ins);
12649 link_bblock (cfg, cfg->cbb, end_bblock);
12650 start_new_bblock = 1;
12654 case CEE_MONO_CISINST:
12655 case CEE_MONO_CCASTCLASS: {
12660 token = read32 (ip + 2);
12661 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12662 if (ip [1] == CEE_MONO_CISINST)
12663 ins = handle_cisinst (cfg, klass, sp [0]);
12665 ins = handle_ccastclass (cfg, klass, sp [0]);
12670 case CEE_MONO_SAVE_LMF:
12671 case CEE_MONO_RESTORE_LMF:
12674 case CEE_MONO_CLASSCONST:
12675 CHECK_STACK_OVF (1);
12677 token = read32 (ip + 2);
12678 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12681 inline_costs += 10 * num_calls++;
12683 case CEE_MONO_NOT_TAKEN:
12684 cfg->cbb->out_of_line = TRUE;
12687 case CEE_MONO_TLS: {
12690 CHECK_STACK_OVF (1);
12692 key = (MonoTlsKey)read32 (ip + 2);
12693 g_assert (key < TLS_KEY_NUM);
12695 ins = mono_create_tls_get (cfg, key);
12697 if (cfg->compile_aot) {
12699 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12700 ins->dreg = alloc_preg (cfg);
12701 ins->type = STACK_PTR;
12703 g_assert_not_reached ();
12706 ins->type = STACK_PTR;
12707 MONO_ADD_INS (cfg->cbb, ins);
12712 case CEE_MONO_DYN_CALL: {
12713 MonoCallInst *call;
12715 /* It would be easier to call a trampoline, but that would put an
12716 * extra frame on the stack, confusing exception handling. So
12717 * implement it inline using an opcode for now.
12720 if (!cfg->dyn_call_var) {
12721 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12722 /* prevent it from being register allocated */
12723 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12726 /* Has to use a call inst since it local regalloc expects it */
12727 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12728 ins = (MonoInst*)call;
12730 ins->sreg1 = sp [0]->dreg;
12731 ins->sreg2 = sp [1]->dreg;
12732 MONO_ADD_INS (cfg->cbb, ins);
12734 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12737 inline_costs += 10 * num_calls++;
12741 case CEE_MONO_MEMORY_BARRIER: {
12743 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12747 case CEE_MONO_ATOMIC_STORE_I4: {
12748 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12754 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12755 ins->dreg = sp [0]->dreg;
12756 ins->sreg1 = sp [1]->dreg;
12757 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12758 MONO_ADD_INS (cfg->cbb, ins);
12763 case CEE_MONO_JIT_ATTACH: {
12764 MonoInst *args [16], *domain_ins;
12765 MonoInst *ad_ins, *jit_tls_ins;
12766 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12768 g_assert (!mono_threads_is_coop_enabled ());
12770 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12772 EMIT_NEW_PCONST (cfg, ins, NULL);
12773 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12775 ad_ins = mono_get_domain_intrinsic (cfg);
12776 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12778 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12779 NEW_BBLOCK (cfg, next_bb);
12780 NEW_BBLOCK (cfg, call_bb);
12782 if (cfg->compile_aot) {
12783 /* AOT code is only used in the root domain */
12784 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12786 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12788 MONO_ADD_INS (cfg->cbb, ad_ins);
12789 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12790 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12792 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12793 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12794 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12796 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12797 MONO_START_BB (cfg, call_bb);
12800 /* AOT code is only used in the root domain */
12801 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12802 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12803 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12806 MONO_START_BB (cfg, next_bb);
12812 case CEE_MONO_JIT_DETACH: {
12813 MonoInst *args [16];
12815 /* Restore the original domain */
12816 dreg = alloc_ireg (cfg);
12817 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12818 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12822 case CEE_MONO_CALLI_EXTRA_ARG: {
12824 MonoMethodSignature *fsig;
12828 * This is the same as CEE_CALLI, but passes an additional argument
12829 * to the called method in llvmonly mode.
12830 * This is only used by delegate invoke wrappers to call the
12831 * actual delegate method.
12833 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12836 token = read32 (ip + 2);
12844 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12847 if (cfg->llvm_only)
12848 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12850 n = fsig->param_count + fsig->hasthis + 1;
12857 if (cfg->llvm_only) {
12859 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12860 * cconv. This is set by mono_init_delegate ().
12862 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12863 MonoInst *callee = addr;
12864 MonoInst *call, *localloc_ins;
12865 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12866 int low_bit_reg = alloc_preg (cfg);
12868 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12869 NEW_BBLOCK (cfg, end_bb);
12871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12875 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12876 addr = emit_get_rgctx_sig (cfg, context_used,
12877 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12879 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12881 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12882 ins->dreg = alloc_preg (cfg);
12883 ins->inst_imm = 2 * SIZEOF_VOID_P;
12884 MONO_ADD_INS (cfg->cbb, ins);
12885 localloc_ins = ins;
12886 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12887 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12890 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12891 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12893 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12894 MONO_START_BB (cfg, is_gsharedvt_bb);
12895 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12896 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12897 ins->dreg = call->dreg;
12899 MONO_START_BB (cfg, end_bb);
12901 /* Caller uses a normal calling conv */
12903 MonoInst *callee = addr;
12904 MonoInst *call, *localloc_ins;
12905 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12906 int low_bit_reg = alloc_preg (cfg);
12908 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12909 NEW_BBLOCK (cfg, end_bb);
12911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12912 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12913 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12915 /* Normal case: callee uses a normal cconv, no conversion is needed */
12916 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12918 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12919 MONO_START_BB (cfg, is_gsharedvt_bb);
12920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12921 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12922 MONO_ADD_INS (cfg->cbb, addr);
12924 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12926 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12927 ins->dreg = alloc_preg (cfg);
12928 ins->inst_imm = 2 * SIZEOF_VOID_P;
12929 MONO_ADD_INS (cfg->cbb, ins);
12930 localloc_ins = ins;
12931 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12932 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12935 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12936 ins->dreg = call->dreg;
12937 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12939 MONO_START_BB (cfg, end_bb);
12942 /* Same as CEE_CALLI */
12943 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12945 * We pass the address to the gsharedvt trampoline in the rgctx reg
12947 MonoInst *callee = addr;
12949 addr = emit_get_rgctx_sig (cfg, context_used,
12950 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12951 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12953 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12957 if (!MONO_TYPE_IS_VOID (fsig->ret))
12958 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12960 CHECK_CFG_EXCEPTION;
12964 constrained_class = NULL;
12967 case CEE_MONO_LDDOMAIN:
12968 CHECK_STACK_OVF (1);
12969 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12973 case CEE_MONO_GET_LAST_ERROR:
12975 CHECK_STACK_OVF (1);
12977 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12978 ins->dreg = alloc_dreg (cfg, STACK_I4);
12979 ins->type = STACK_I4;
12980 MONO_ADD_INS (cfg->cbb, ins);
12986 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12992 case CEE_PREFIX1: {
12995 case CEE_ARGLIST: {
12996 /* somewhat similar to LDTOKEN */
12997 MonoInst *addr, *vtvar;
12998 CHECK_STACK_OVF (1);
12999 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
13001 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
13002 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
13004 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
13005 ins->type = STACK_VTYPE;
13006 ins->klass = mono_defaults.argumenthandle_class;
13016 MonoInst *cmp, *arg1, *arg2;
13024 * The following transforms:
13025 * CEE_CEQ into OP_CEQ
13026 * CEE_CGT into OP_CGT
13027 * CEE_CGT_UN into OP_CGT_UN
13028 * CEE_CLT into OP_CLT
13029 * CEE_CLT_UN into OP_CLT_UN
13031 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13033 MONO_INST_NEW (cfg, ins, cmp->opcode);
13034 cmp->sreg1 = arg1->dreg;
13035 cmp->sreg2 = arg2->dreg;
13036 type_from_op (cfg, cmp, arg1, arg2);
13038 add_widen_op (cfg, cmp, &arg1, &arg2);
13039 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13040 cmp->opcode = OP_LCOMPARE;
13041 else if (arg1->type == STACK_R4)
13042 cmp->opcode = OP_RCOMPARE;
13043 else if (arg1->type == STACK_R8)
13044 cmp->opcode = OP_FCOMPARE;
13046 cmp->opcode = OP_ICOMPARE;
13047 MONO_ADD_INS (cfg->cbb, cmp);
13048 ins->type = STACK_I4;
13049 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13050 type_from_op (cfg, ins, arg1, arg2);
13052 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13054 * The backends expect the fceq opcodes to do the
13057 ins->sreg1 = cmp->sreg1;
13058 ins->sreg2 = cmp->sreg2;
13061 MONO_ADD_INS (cfg->cbb, ins);
13067 MonoInst *argconst;
13068 MonoMethod *cil_method;
13070 CHECK_STACK_OVF (1);
13072 n = read32 (ip + 2);
13073 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13076 mono_class_init (cmethod->klass);
13078 mono_save_token_info (cfg, image, n, cmethod);
13080 context_used = mini_method_check_context_used (cfg, cmethod);
13082 cil_method = cmethod;
13083 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13084 emit_method_access_failure (cfg, method, cil_method);
13086 if (mono_security_core_clr_enabled ())
13087 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13090 * Optimize the common case of ldftn+delegate creation
13092 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13093 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13094 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13095 MonoInst *target_ins, *handle_ins;
13096 MonoMethod *invoke;
13097 int invoke_context_used;
13099 invoke = mono_get_delegate_invoke (ctor_method->klass);
13100 if (!invoke || !mono_method_signature (invoke))
13103 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13105 target_ins = sp [-1];
13107 if (mono_security_core_clr_enabled ())
13108 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13110 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13111 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13112 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13114 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13118 /* FIXME: SGEN support */
13119 if (invoke_context_used == 0 || cfg->llvm_only) {
13121 if (cfg->verbose_level > 3)
13122 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13123 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13126 CHECK_CFG_EXCEPTION;
13136 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13137 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13141 inline_costs += 10 * num_calls++;
13144 case CEE_LDVIRTFTN: {
13145 MonoInst *args [2];
13149 n = read32 (ip + 2);
13150 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13153 mono_class_init (cmethod->klass);
13155 context_used = mini_method_check_context_used (cfg, cmethod);
13157 if (mono_security_core_clr_enabled ())
13158 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13161 * Optimize the common case of ldvirtftn+delegate creation
13163 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13164 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13165 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13166 MonoInst *target_ins, *handle_ins;
13167 MonoMethod *invoke;
13168 int invoke_context_used;
13169 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13171 invoke = mono_get_delegate_invoke (ctor_method->klass);
13172 if (!invoke || !mono_method_signature (invoke))
13175 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13177 target_ins = sp [-1];
13179 if (mono_security_core_clr_enabled ())
13180 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13182 /* FIXME: SGEN support */
13183 if (invoke_context_used == 0 || cfg->llvm_only) {
13185 if (cfg->verbose_level > 3)
13186 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13187 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13190 CHECK_CFG_EXCEPTION;
13203 args [1] = emit_get_rgctx_method (cfg, context_used,
13204 cmethod, MONO_RGCTX_INFO_METHOD);
13207 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13209 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13212 inline_costs += 10 * num_calls++;
13216 CHECK_STACK_OVF (1);
13218 n = read16 (ip + 2);
13220 EMIT_NEW_ARGLOAD (cfg, ins, n);
13225 CHECK_STACK_OVF (1);
13227 n = read16 (ip + 2);
13229 NEW_ARGLOADA (cfg, ins, n);
13230 MONO_ADD_INS (cfg->cbb, ins);
13238 n = read16 (ip + 2);
13240 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13242 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13246 CHECK_STACK_OVF (1);
13248 n = read16 (ip + 2);
13250 EMIT_NEW_LOCLOAD (cfg, ins, n);
13255 unsigned char *tmp_ip;
13256 CHECK_STACK_OVF (1);
13258 n = read16 (ip + 2);
13261 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13267 EMIT_NEW_LOCLOADA (cfg, ins, n);
13276 n = read16 (ip + 2);
13278 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13280 emit_stloc_ir (cfg, sp, header, n);
13284 case CEE_LOCALLOC: {
13286 MonoBasicBlock *non_zero_bb, *end_bb;
13287 int alloc_ptr = alloc_preg (cfg);
13289 if (sp != stack_start)
13291 if (cfg->method != method)
13293 * Inlining this into a loop in a parent could lead to
13294 * stack overflows which is different behavior than the
13295 * non-inlined case, thus disable inlining in this case.
13297 INLINE_FAILURE("localloc");
13299 NEW_BBLOCK (cfg, non_zero_bb);
13300 NEW_BBLOCK (cfg, end_bb);
13302 /* if size != zero */
13303 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
13304 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
13306 //size is zero, so result is NULL
13307 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
13308 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
13310 MONO_START_BB (cfg, non_zero_bb);
13311 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13312 ins->dreg = alloc_ptr;
13313 ins->sreg1 = sp [0]->dreg;
13314 ins->type = STACK_PTR;
13315 MONO_ADD_INS (cfg->cbb, ins);
13317 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13319 ins->flags |= MONO_INST_INIT;
13321 MONO_START_BB (cfg, end_bb);
13322 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
13323 ins->type = STACK_PTR;
13329 case CEE_ENDFILTER: {
13330 MonoExceptionClause *clause, *nearest;
13335 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13337 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13338 ins->sreg1 = (*sp)->dreg;
13339 MONO_ADD_INS (cfg->cbb, ins);
13340 start_new_bblock = 1;
13344 for (cc = 0; cc < header->num_clauses; ++cc) {
13345 clause = &header->clauses [cc];
13346 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13347 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13348 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13351 g_assert (nearest);
13352 if ((ip - header->code) != nearest->handler_offset)
13357 case CEE_UNALIGNED_:
13358 ins_flag |= MONO_INST_UNALIGNED;
13359 /* FIXME: record alignment? we can assume 1 for now */
13363 case CEE_VOLATILE_:
13364 ins_flag |= MONO_INST_VOLATILE;
13368 ins_flag |= MONO_INST_TAILCALL;
13369 cfg->flags |= MONO_CFG_HAS_TAIL;
13370 /* Can't inline tail calls at this time */
13371 inline_costs += 100000;
13378 token = read32 (ip + 2);
13379 klass = mini_get_class (method, token, generic_context);
13380 CHECK_TYPELOAD (klass);
13381 if (generic_class_is_reference_type (cfg, klass))
13382 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13384 mini_emit_initobj (cfg, *sp, NULL, klass);
13388 case CEE_CONSTRAINED_:
13390 token = read32 (ip + 2);
13391 constrained_class = mini_get_class (method, token, generic_context);
13392 CHECK_TYPELOAD (constrained_class);
13396 case CEE_INITBLK: {
13397 MonoInst *iargs [3];
13401 /* Skip optimized paths for volatile operations. */
13402 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13403 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13404 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13405 /* emit_memset only works when val == 0 */
13406 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13409 iargs [0] = sp [0];
13410 iargs [1] = sp [1];
13411 iargs [2] = sp [2];
13412 if (ip [1] == CEE_CPBLK) {
13414 * FIXME: It's unclear whether we should be emitting both the acquire
13415 * and release barriers for cpblk. It is technically both a load and
13416 * store operation, so it seems like that's the sensible thing to do.
13418 * FIXME: We emit full barriers on both sides of the operation for
13419 * simplicity. We should have a separate atomic memcpy method instead.
13421 MonoMethod *memcpy_method = get_memcpy_method ();
13423 if (ins_flag & MONO_INST_VOLATILE)
13424 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13426 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13427 call->flags |= ins_flag;
13429 if (ins_flag & MONO_INST_VOLATILE)
13430 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13432 MonoMethod *memset_method = get_memset_method ();
13433 if (ins_flag & MONO_INST_VOLATILE) {
13434 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13435 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13437 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13438 call->flags |= ins_flag;
13449 ins_flag |= MONO_INST_NOTYPECHECK;
13451 ins_flag |= MONO_INST_NORANGECHECK;
13452 /* we ignore the no-nullcheck for now since we
13453 * really do it explicitly only when doing callvirt->call
13457 case CEE_RETHROW: {
13459 int handler_offset = -1;
13461 for (i = 0; i < header->num_clauses; ++i) {
13462 MonoExceptionClause *clause = &header->clauses [i];
13463 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13464 handler_offset = clause->handler_offset;
13469 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13471 if (handler_offset == -1)
13474 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13475 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13476 ins->sreg1 = load->dreg;
13477 MONO_ADD_INS (cfg->cbb, ins);
13479 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13480 MONO_ADD_INS (cfg->cbb, ins);
13483 link_bblock (cfg, cfg->cbb, end_bblock);
13484 start_new_bblock = 1;
13492 CHECK_STACK_OVF (1);
13494 token = read32 (ip + 2);
13495 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13496 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13499 val = mono_type_size (type, &ialign);
13501 MonoClass *klass = mini_get_class (method, token, generic_context);
13502 CHECK_TYPELOAD (klass);
13504 val = mono_type_size (&klass->byval_arg, &ialign);
13506 if (mini_is_gsharedvt_klass (klass))
13507 GSHAREDVT_FAILURE (*ip);
13509 EMIT_NEW_ICONST (cfg, ins, val);
13514 case CEE_REFANYTYPE: {
13515 MonoInst *src_var, *src;
13517 GSHAREDVT_FAILURE (*ip);
13523 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13525 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13526 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13527 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13532 case CEE_READONLY_:
13545 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13555 g_warning ("opcode 0x%02x not handled", *ip);
13559 if (start_new_bblock != 1)
13562 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13563 if (cfg->cbb->next_bb) {
13564 /* This could already be set because of inlining, #693905 */
13565 MonoBasicBlock *bb = cfg->cbb;
13567 while (bb->next_bb)
13569 bb->next_bb = end_bblock;
13571 cfg->cbb->next_bb = end_bblock;
13574 if (cfg->method == method && cfg->domainvar) {
13576 MonoInst *get_domain;
13578 cfg->cbb = init_localsbb;
13580 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13581 MONO_ADD_INS (cfg->cbb, get_domain);
13583 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13585 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13586 MONO_ADD_INS (cfg->cbb, store);
13589 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13590 if (cfg->compile_aot)
13591 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13592 mono_get_got_var (cfg);
13595 if (cfg->method == method && cfg->got_var)
13596 mono_emit_load_got_addr (cfg);
13598 if (init_localsbb) {
13599 cfg->cbb = init_localsbb;
13601 for (i = 0; i < header->num_locals; ++i) {
13602 emit_init_local (cfg, i, header->locals [i], init_locals);
13606 if (cfg->init_ref_vars && cfg->method == method) {
13607 /* Emit initialization for ref vars */
13608 // FIXME: Avoid duplication initialization for IL locals.
13609 for (i = 0; i < cfg->num_varinfo; ++i) {
13610 MonoInst *ins = cfg->varinfo [i];
13612 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13613 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13617 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13618 cfg->cbb = init_localsbb;
13619 emit_push_lmf (cfg);
13622 cfg->cbb = init_localsbb;
13623 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13626 MonoBasicBlock *bb;
13629 * Make seq points at backward branch targets interruptable.
13631 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13632 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13633 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13636 /* Add a sequence point for method entry/exit events */
13637 if (seq_points && cfg->gen_sdb_seq_points) {
13638 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13639 MONO_ADD_INS (init_localsbb, ins);
13640 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13641 MONO_ADD_INS (cfg->bb_exit, ins);
13645 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13646 * the code they refer to was dead (#11880).
13648 if (sym_seq_points) {
13649 for (i = 0; i < header->code_size; ++i) {
13650 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13653 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13654 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13661 if (cfg->method == method) {
13662 MonoBasicBlock *bb;
13663 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13664 bb->region = mono_find_block_region (cfg, bb->real_offset);
13666 mono_create_spvar_for_region (cfg, bb->region);
13667 if (cfg->verbose_level > 2)
13668 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13671 MonoBasicBlock *bb;
13672 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13673 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13674 bb->real_offset = inline_offset;
13678 if (inline_costs < 0) {
13681 /* Method is too large */
13682 mname = mono_method_full_name (method, TRUE);
13683 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13687 if ((cfg->verbose_level > 2) && (cfg->method == method))
13688 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13693 g_assert (!mono_error_ok (&cfg->error));
13697 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13701 set_exception_type_from_invalid_il (cfg, method, ip);
13705 g_slist_free (class_inits);
13706 mono_basic_block_free (original_bb);
13707 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13708 if (cfg->exception_type)
13711 return inline_costs;
13715 store_membase_reg_to_store_membase_imm (int opcode)
13718 case OP_STORE_MEMBASE_REG:
13719 return OP_STORE_MEMBASE_IMM;
13720 case OP_STOREI1_MEMBASE_REG:
13721 return OP_STOREI1_MEMBASE_IMM;
13722 case OP_STOREI2_MEMBASE_REG:
13723 return OP_STOREI2_MEMBASE_IMM;
13724 case OP_STOREI4_MEMBASE_REG:
13725 return OP_STOREI4_MEMBASE_IMM;
13726 case OP_STOREI8_MEMBASE_REG:
13727 return OP_STOREI8_MEMBASE_IMM;
13729 g_assert_not_reached ();
13736 mono_op_to_op_imm (int opcode)
13740 return OP_IADD_IMM;
13742 return OP_ISUB_IMM;
13744 return OP_IDIV_IMM;
13746 return OP_IDIV_UN_IMM;
13748 return OP_IREM_IMM;
13750 return OP_IREM_UN_IMM;
13752 return OP_IMUL_IMM;
13754 return OP_IAND_IMM;
13758 return OP_IXOR_IMM;
13760 return OP_ISHL_IMM;
13762 return OP_ISHR_IMM;
13764 return OP_ISHR_UN_IMM;
13767 return OP_LADD_IMM;
13769 return OP_LSUB_IMM;
13771 return OP_LAND_IMM;
13775 return OP_LXOR_IMM;
13777 return OP_LSHL_IMM;
13779 return OP_LSHR_IMM;
13781 return OP_LSHR_UN_IMM;
13782 #if SIZEOF_REGISTER == 8
13784 return OP_LREM_IMM;
13788 return OP_COMPARE_IMM;
13790 return OP_ICOMPARE_IMM;
13792 return OP_LCOMPARE_IMM;
13794 case OP_STORE_MEMBASE_REG:
13795 return OP_STORE_MEMBASE_IMM;
13796 case OP_STOREI1_MEMBASE_REG:
13797 return OP_STOREI1_MEMBASE_IMM;
13798 case OP_STOREI2_MEMBASE_REG:
13799 return OP_STOREI2_MEMBASE_IMM;
13800 case OP_STOREI4_MEMBASE_REG:
13801 return OP_STOREI4_MEMBASE_IMM;
13803 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13805 return OP_X86_PUSH_IMM;
13806 case OP_X86_COMPARE_MEMBASE_REG:
13807 return OP_X86_COMPARE_MEMBASE_IMM;
13809 #if defined(TARGET_AMD64)
13810 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13811 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13813 case OP_VOIDCALL_REG:
13814 return OP_VOIDCALL;
13822 return OP_LOCALLOC_IMM;
13829 ldind_to_load_membase (int opcode)
13833 return OP_LOADI1_MEMBASE;
13835 return OP_LOADU1_MEMBASE;
13837 return OP_LOADI2_MEMBASE;
13839 return OP_LOADU2_MEMBASE;
13841 return OP_LOADI4_MEMBASE;
13843 return OP_LOADU4_MEMBASE;
13845 return OP_LOAD_MEMBASE;
13846 case CEE_LDIND_REF:
13847 return OP_LOAD_MEMBASE;
13849 return OP_LOADI8_MEMBASE;
13851 return OP_LOADR4_MEMBASE;
13853 return OP_LOADR8_MEMBASE;
13855 g_assert_not_reached ();
13862 stind_to_store_membase (int opcode)
13866 return OP_STOREI1_MEMBASE_REG;
13868 return OP_STOREI2_MEMBASE_REG;
13870 return OP_STOREI4_MEMBASE_REG;
13872 case CEE_STIND_REF:
13873 return OP_STORE_MEMBASE_REG;
13875 return OP_STOREI8_MEMBASE_REG;
13877 return OP_STORER4_MEMBASE_REG;
13879 return OP_STORER8_MEMBASE_REG;
13881 g_assert_not_reached ();
13888 mono_load_membase_to_load_mem (int opcode)
13890 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13891 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13893 case OP_LOAD_MEMBASE:
13894 return OP_LOAD_MEM;
13895 case OP_LOADU1_MEMBASE:
13896 return OP_LOADU1_MEM;
13897 case OP_LOADU2_MEMBASE:
13898 return OP_LOADU2_MEM;
13899 case OP_LOADI4_MEMBASE:
13900 return OP_LOADI4_MEM;
13901 case OP_LOADU4_MEMBASE:
13902 return OP_LOADU4_MEM;
13903 #if SIZEOF_REGISTER == 8
13904 case OP_LOADI8_MEMBASE:
13905 return OP_LOADI8_MEM;
13914 op_to_op_dest_membase (int store_opcode, int opcode)
13916 #if defined(TARGET_X86)
13917 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13922 return OP_X86_ADD_MEMBASE_REG;
13924 return OP_X86_SUB_MEMBASE_REG;
13926 return OP_X86_AND_MEMBASE_REG;
13928 return OP_X86_OR_MEMBASE_REG;
13930 return OP_X86_XOR_MEMBASE_REG;
13933 return OP_X86_ADD_MEMBASE_IMM;
13936 return OP_X86_SUB_MEMBASE_IMM;
13939 return OP_X86_AND_MEMBASE_IMM;
13942 return OP_X86_OR_MEMBASE_IMM;
13945 return OP_X86_XOR_MEMBASE_IMM;
13951 #if defined(TARGET_AMD64)
13952 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13957 return OP_X86_ADD_MEMBASE_REG;
13959 return OP_X86_SUB_MEMBASE_REG;
13961 return OP_X86_AND_MEMBASE_REG;
13963 return OP_X86_OR_MEMBASE_REG;
13965 return OP_X86_XOR_MEMBASE_REG;
13967 return OP_X86_ADD_MEMBASE_IMM;
13969 return OP_X86_SUB_MEMBASE_IMM;
13971 return OP_X86_AND_MEMBASE_IMM;
13973 return OP_X86_OR_MEMBASE_IMM;
13975 return OP_X86_XOR_MEMBASE_IMM;
13977 return OP_AMD64_ADD_MEMBASE_REG;
13979 return OP_AMD64_SUB_MEMBASE_REG;
13981 return OP_AMD64_AND_MEMBASE_REG;
13983 return OP_AMD64_OR_MEMBASE_REG;
13985 return OP_AMD64_XOR_MEMBASE_REG;
13988 return OP_AMD64_ADD_MEMBASE_IMM;
13991 return OP_AMD64_SUB_MEMBASE_IMM;
13994 return OP_AMD64_AND_MEMBASE_IMM;
13997 return OP_AMD64_OR_MEMBASE_IMM;
14000 return OP_AMD64_XOR_MEMBASE_IMM;
14010 op_to_op_store_membase (int store_opcode, int opcode)
14012 #if defined(TARGET_X86) || defined(TARGET_AMD64)
14015 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14016 return OP_X86_SETEQ_MEMBASE;
14018 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14019 return OP_X86_SETNE_MEMBASE;
14027 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
14030 /* FIXME: This has sign extension issues */
14032 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14033 return OP_X86_COMPARE_MEMBASE8_IMM;
14036 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14041 return OP_X86_PUSH_MEMBASE;
14042 case OP_COMPARE_IMM:
14043 case OP_ICOMPARE_IMM:
14044 return OP_X86_COMPARE_MEMBASE_IMM;
14047 return OP_X86_COMPARE_MEMBASE_REG;
14051 #ifdef TARGET_AMD64
14052 /* FIXME: This has sign extension issues */
14054 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14055 return OP_X86_COMPARE_MEMBASE8_IMM;
14060 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14061 return OP_X86_PUSH_MEMBASE;
14063 /* FIXME: This only works for 32 bit immediates
14064 case OP_COMPARE_IMM:
14065 case OP_LCOMPARE_IMM:
14066 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14067 return OP_AMD64_COMPARE_MEMBASE_IMM;
14069 case OP_ICOMPARE_IMM:
14070 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14071 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14075 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14076 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14077 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14078 return OP_AMD64_COMPARE_MEMBASE_REG;
14081 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14082 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14091 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14094 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14100 return OP_X86_COMPARE_REG_MEMBASE;
14102 return OP_X86_ADD_REG_MEMBASE;
14104 return OP_X86_SUB_REG_MEMBASE;
14106 return OP_X86_AND_REG_MEMBASE;
14108 return OP_X86_OR_REG_MEMBASE;
14110 return OP_X86_XOR_REG_MEMBASE;
14114 #ifdef TARGET_AMD64
14115 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14118 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14120 return OP_X86_ADD_REG_MEMBASE;
14122 return OP_X86_SUB_REG_MEMBASE;
14124 return OP_X86_AND_REG_MEMBASE;
14126 return OP_X86_OR_REG_MEMBASE;
14128 return OP_X86_XOR_REG_MEMBASE;
14130 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14134 return OP_AMD64_COMPARE_REG_MEMBASE;
14136 return OP_AMD64_ADD_REG_MEMBASE;
14138 return OP_AMD64_SUB_REG_MEMBASE;
14140 return OP_AMD64_AND_REG_MEMBASE;
14142 return OP_AMD64_OR_REG_MEMBASE;
14144 return OP_AMD64_XOR_REG_MEMBASE;
14153 mono_op_to_op_imm_noemul (int opcode)
14156 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14162 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14169 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14174 return mono_op_to_op_imm (opcode);
14179 * mono_handle_global_vregs:
14181 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14185 mono_handle_global_vregs (MonoCompile *cfg)
14187 gint32 *vreg_to_bb;
14188 MonoBasicBlock *bb;
14191 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14193 #ifdef MONO_ARCH_SIMD_INTRINSICS
14194 if (cfg->uses_simd_intrinsics)
14195 mono_simd_simplify_indirection (cfg);
14198 /* Find local vregs used in more than one bb */
14199 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14200 MonoInst *ins = bb->code;
14201 int block_num = bb->block_num;
14203 if (cfg->verbose_level > 2)
14204 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14207 for (; ins; ins = ins->next) {
14208 const char *spec = INS_INFO (ins->opcode);
14209 int regtype = 0, regindex;
14212 if (G_UNLIKELY (cfg->verbose_level > 2))
14213 mono_print_ins (ins);
14215 g_assert (ins->opcode >= MONO_CEE_LAST);
14217 for (regindex = 0; regindex < 4; regindex ++) {
14220 if (regindex == 0) {
14221 regtype = spec [MONO_INST_DEST];
14222 if (regtype == ' ')
14225 } else if (regindex == 1) {
14226 regtype = spec [MONO_INST_SRC1];
14227 if (regtype == ' ')
14230 } else if (regindex == 2) {
14231 regtype = spec [MONO_INST_SRC2];
14232 if (regtype == ' ')
14235 } else if (regindex == 3) {
14236 regtype = spec [MONO_INST_SRC3];
14237 if (regtype == ' ')
14242 #if SIZEOF_REGISTER == 4
14243 /* In the LLVM case, the long opcodes are not decomposed */
14244 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14246 * Since some instructions reference the original long vreg,
14247 * and some reference the two component vregs, it is quite hard
14248 * to determine when it needs to be global. So be conservative.
14250 if (!get_vreg_to_inst (cfg, vreg)) {
14251 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14253 if (cfg->verbose_level > 2)
14254 printf ("LONG VREG R%d made global.\n", vreg);
14258 * Make the component vregs volatile since the optimizations can
14259 * get confused otherwise.
14261 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14262 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14266 g_assert (vreg != -1);
14268 prev_bb = vreg_to_bb [vreg];
14269 if (prev_bb == 0) {
14270 /* 0 is a valid block num */
14271 vreg_to_bb [vreg] = block_num + 1;
14272 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14273 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14276 if (!get_vreg_to_inst (cfg, vreg)) {
14277 if (G_UNLIKELY (cfg->verbose_level > 2))
14278 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14282 if (vreg_is_ref (cfg, vreg))
14283 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14285 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14288 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14291 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14294 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14297 g_assert_not_reached ();
14301 /* Flag as having been used in more than one bb */
14302 vreg_to_bb [vreg] = -1;
14308 /* If a variable is used in only one bblock, convert it into a local vreg */
14309 for (i = 0; i < cfg->num_varinfo; i++) {
14310 MonoInst *var = cfg->varinfo [i];
14311 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14313 switch (var->type) {
14319 #if SIZEOF_REGISTER == 8
14322 #if !defined(TARGET_X86)
14323 /* Enabling this screws up the fp stack on x86 */
14326 if (mono_arch_is_soft_float ())
14330 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14334 /* Arguments are implicitly global */
14335 /* Putting R4 vars into registers doesn't work currently */
14336 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14337 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14339 * Make that the variable's liveness interval doesn't contain a call, since
14340 * that would cause the lvreg to be spilled, making the whole optimization
14343 /* This is too slow for JIT compilation */
14345 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14347 int def_index, call_index, ins_index;
14348 gboolean spilled = FALSE;
14353 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14354 const char *spec = INS_INFO (ins->opcode);
14356 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14357 def_index = ins_index;
14359 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14360 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14361 if (call_index > def_index) {
14367 if (MONO_IS_CALL (ins))
14368 call_index = ins_index;
14378 if (G_UNLIKELY (cfg->verbose_level > 2))
14379 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14380 var->flags |= MONO_INST_IS_DEAD;
14381 cfg->vreg_to_inst [var->dreg] = NULL;
14388 * Compress the varinfo and vars tables so the liveness computation is faster and
14389 * takes up less space.
14392 for (i = 0; i < cfg->num_varinfo; ++i) {
14393 MonoInst *var = cfg->varinfo [i];
14394 if (pos < i && cfg->locals_start == i)
14395 cfg->locals_start = pos;
14396 if (!(var->flags & MONO_INST_IS_DEAD)) {
14398 cfg->varinfo [pos] = cfg->varinfo [i];
14399 cfg->varinfo [pos]->inst_c0 = pos;
14400 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14401 cfg->vars [pos].idx = pos;
14402 #if SIZEOF_REGISTER == 4
14403 if (cfg->varinfo [pos]->type == STACK_I8) {
14404 /* Modify the two component vars too */
14407 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14408 var1->inst_c0 = pos;
14409 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14410 var1->inst_c0 = pos;
14417 cfg->num_varinfo = pos;
14418 if (cfg->locals_start > cfg->num_varinfo)
14419 cfg->locals_start = cfg->num_varinfo;
14423 * mono_allocate_gsharedvt_vars:
14425 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14426 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14429 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14433 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14435 for (i = 0; i < cfg->num_varinfo; ++i) {
14436 MonoInst *ins = cfg->varinfo [i];
14439 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14440 if (i >= cfg->locals_start) {
14442 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14443 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14444 ins->opcode = OP_GSHAREDVT_LOCAL;
14445 ins->inst_imm = idx;
14448 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14449 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14456 * mono_spill_global_vars:
14458 * Generate spill code for variables which are not allocated to registers,
14459 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14460 * code is generated which could be optimized by the local optimization passes.
14463 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14465 MonoBasicBlock *bb;
14467 int orig_next_vreg;
14468 guint32 *vreg_to_lvreg;
14470 guint32 i, lvregs_len;
14471 gboolean dest_has_lvreg = FALSE;
14472 MonoStackType stacktypes [128];
14473 MonoInst **live_range_start, **live_range_end;
14474 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14476 *need_local_opts = FALSE;
14478 memset (spec2, 0, sizeof (spec2));
14480 /* FIXME: Move this function to mini.c */
14481 stacktypes ['i'] = STACK_PTR;
14482 stacktypes ['l'] = STACK_I8;
14483 stacktypes ['f'] = STACK_R8;
14484 #ifdef MONO_ARCH_SIMD_INTRINSICS
14485 stacktypes ['x'] = STACK_VTYPE;
14488 #if SIZEOF_REGISTER == 4
14489 /* Create MonoInsts for longs */
14490 for (i = 0; i < cfg->num_varinfo; i++) {
14491 MonoInst *ins = cfg->varinfo [i];
14493 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14494 switch (ins->type) {
14499 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14502 g_assert (ins->opcode == OP_REGOFFSET);
14504 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14506 tree->opcode = OP_REGOFFSET;
14507 tree->inst_basereg = ins->inst_basereg;
14508 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14510 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14512 tree->opcode = OP_REGOFFSET;
14513 tree->inst_basereg = ins->inst_basereg;
14514 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14524 if (cfg->compute_gc_maps) {
14525 /* registers need liveness info even for !non refs */
14526 for (i = 0; i < cfg->num_varinfo; i++) {
14527 MonoInst *ins = cfg->varinfo [i];
14529 if (ins->opcode == OP_REGVAR)
14530 ins->flags |= MONO_INST_GC_TRACK;
14534 /* FIXME: widening and truncation */
14537 * As an optimization, when a variable allocated to the stack is first loaded into
14538 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14539 * the variable again.
14541 orig_next_vreg = cfg->next_vreg;
14542 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14543 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14547 * These arrays contain the first and last instructions accessing a given
14549 * Since we emit bblocks in the same order we process them here, and we
14550 * don't split live ranges, these will precisely describe the live range of
14551 * the variable, i.e. the instruction range where a valid value can be found
14552 * in the variables location.
14553 * The live range is computed using the liveness info computed by the liveness pass.
14554 * We can't use vmv->range, since that is an abstract live range, and we need
14555 * one which is instruction precise.
14556 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14558 /* FIXME: Only do this if debugging info is requested */
14559 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14560 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14561 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14562 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14564 /* Add spill loads/stores */
14565 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14568 if (cfg->verbose_level > 2)
14569 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14571 /* Clear vreg_to_lvreg array */
14572 for (i = 0; i < lvregs_len; i++)
14573 vreg_to_lvreg [lvregs [i]] = 0;
14577 MONO_BB_FOR_EACH_INS (bb, ins) {
14578 const char *spec = INS_INFO (ins->opcode);
14579 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14580 gboolean store, no_lvreg;
14581 int sregs [MONO_MAX_SRC_REGS];
14583 if (G_UNLIKELY (cfg->verbose_level > 2))
14584 mono_print_ins (ins);
14586 if (ins->opcode == OP_NOP)
14590 * We handle LDADDR here as well, since it can only be decomposed
14591 * when variable addresses are known.
14593 if (ins->opcode == OP_LDADDR) {
14594 MonoInst *var = (MonoInst *)ins->inst_p0;
14596 if (var->opcode == OP_VTARG_ADDR) {
14597 /* Happens on SPARC/S390 where vtypes are passed by reference */
14598 MonoInst *vtaddr = var->inst_left;
14599 if (vtaddr->opcode == OP_REGVAR) {
14600 ins->opcode = OP_MOVE;
14601 ins->sreg1 = vtaddr->dreg;
14603 else if (var->inst_left->opcode == OP_REGOFFSET) {
14604 ins->opcode = OP_LOAD_MEMBASE;
14605 ins->inst_basereg = vtaddr->inst_basereg;
14606 ins->inst_offset = vtaddr->inst_offset;
14609 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14610 /* gsharedvt arg passed by ref */
14611 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14613 ins->opcode = OP_LOAD_MEMBASE;
14614 ins->inst_basereg = var->inst_basereg;
14615 ins->inst_offset = var->inst_offset;
14616 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14617 MonoInst *load, *load2, *load3;
14618 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14619 int reg1, reg2, reg3;
14620 MonoInst *info_var = cfg->gsharedvt_info_var;
14621 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14625 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14628 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14630 g_assert (info_var);
14631 g_assert (locals_var);
14633 /* Mark the instruction used to compute the locals var as used */
14634 cfg->gsharedvt_locals_var_ins = NULL;
14636 /* Load the offset */
14637 if (info_var->opcode == OP_REGOFFSET) {
14638 reg1 = alloc_ireg (cfg);
14639 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14640 } else if (info_var->opcode == OP_REGVAR) {
14642 reg1 = info_var->dreg;
14644 g_assert_not_reached ();
14646 reg2 = alloc_ireg (cfg);
14647 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14648 /* Load the locals area address */
14649 reg3 = alloc_ireg (cfg);
14650 if (locals_var->opcode == OP_REGOFFSET) {
14651 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14652 } else if (locals_var->opcode == OP_REGVAR) {
14653 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14655 g_assert_not_reached ();
14657 /* Compute the address */
14658 ins->opcode = OP_PADD;
14662 mono_bblock_insert_before_ins (bb, ins, load3);
14663 mono_bblock_insert_before_ins (bb, load3, load2);
14665 mono_bblock_insert_before_ins (bb, load2, load);
14667 g_assert (var->opcode == OP_REGOFFSET);
14669 ins->opcode = OP_ADD_IMM;
14670 ins->sreg1 = var->inst_basereg;
14671 ins->inst_imm = var->inst_offset;
14674 *need_local_opts = TRUE;
14675 spec = INS_INFO (ins->opcode);
14678 if (ins->opcode < MONO_CEE_LAST) {
14679 mono_print_ins (ins);
14680 g_assert_not_reached ();
14684 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14688 if (MONO_IS_STORE_MEMBASE (ins)) {
14689 tmp_reg = ins->dreg;
14690 ins->dreg = ins->sreg2;
14691 ins->sreg2 = tmp_reg;
14694 spec2 [MONO_INST_DEST] = ' ';
14695 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14696 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14697 spec2 [MONO_INST_SRC3] = ' ';
14699 } else if (MONO_IS_STORE_MEMINDEX (ins))
14700 g_assert_not_reached ();
14705 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14706 printf ("\t %.3s %d", spec, ins->dreg);
14707 num_sregs = mono_inst_get_src_registers (ins, sregs);
14708 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14709 printf (" %d", sregs [srcindex]);
14716 regtype = spec [MONO_INST_DEST];
14717 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14720 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14721 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14722 MonoInst *store_ins;
14724 MonoInst *def_ins = ins;
14725 int dreg = ins->dreg; /* The original vreg */
14727 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14729 if (var->opcode == OP_REGVAR) {
14730 ins->dreg = var->dreg;
14731 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14733 * Instead of emitting a load+store, use a _membase opcode.
14735 g_assert (var->opcode == OP_REGOFFSET);
14736 if (ins->opcode == OP_MOVE) {
14740 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14741 ins->inst_basereg = var->inst_basereg;
14742 ins->inst_offset = var->inst_offset;
14745 spec = INS_INFO (ins->opcode);
14749 g_assert (var->opcode == OP_REGOFFSET);
14751 prev_dreg = ins->dreg;
14753 /* Invalidate any previous lvreg for this vreg */
14754 vreg_to_lvreg [ins->dreg] = 0;
14758 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14760 store_opcode = OP_STOREI8_MEMBASE_REG;
14763 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14765 #if SIZEOF_REGISTER != 8
14766 if (regtype == 'l') {
14767 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14768 mono_bblock_insert_after_ins (bb, ins, store_ins);
14769 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14770 mono_bblock_insert_after_ins (bb, ins, store_ins);
14771 def_ins = store_ins;
14776 g_assert (store_opcode != OP_STOREV_MEMBASE);
14778 /* Try to fuse the store into the instruction itself */
14779 /* FIXME: Add more instructions */
14780 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14781 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14782 ins->inst_imm = ins->inst_c0;
14783 ins->inst_destbasereg = var->inst_basereg;
14784 ins->inst_offset = var->inst_offset;
14785 spec = INS_INFO (ins->opcode);
14786 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14787 ins->opcode = store_opcode;
14788 ins->inst_destbasereg = var->inst_basereg;
14789 ins->inst_offset = var->inst_offset;
14793 tmp_reg = ins->dreg;
14794 ins->dreg = ins->sreg2;
14795 ins->sreg2 = tmp_reg;
14798 spec2 [MONO_INST_DEST] = ' ';
14799 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14800 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14801 spec2 [MONO_INST_SRC3] = ' ';
14803 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14804 // FIXME: The backends expect the base reg to be in inst_basereg
14805 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14807 ins->inst_basereg = var->inst_basereg;
14808 ins->inst_offset = var->inst_offset;
14809 spec = INS_INFO (ins->opcode);
14811 /* printf ("INS: "); mono_print_ins (ins); */
14812 /* Create a store instruction */
14813 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14815 /* Insert it after the instruction */
14816 mono_bblock_insert_after_ins (bb, ins, store_ins);
14818 def_ins = store_ins;
14821 * We can't assign ins->dreg to var->dreg here, since the
14822 * sregs could use it. So set a flag, and do it after
14825 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14826 dest_has_lvreg = TRUE;
14831 if (def_ins && !live_range_start [dreg]) {
14832 live_range_start [dreg] = def_ins;
14833 live_range_start_bb [dreg] = bb;
14836 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14839 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14840 tmp->inst_c1 = dreg;
14841 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14848 num_sregs = mono_inst_get_src_registers (ins, sregs);
14849 for (srcindex = 0; srcindex < 3; ++srcindex) {
14850 regtype = spec [MONO_INST_SRC1 + srcindex];
14851 sreg = sregs [srcindex];
14853 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14854 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14855 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14856 MonoInst *use_ins = ins;
14857 MonoInst *load_ins;
14858 guint32 load_opcode;
14860 if (var->opcode == OP_REGVAR) {
14861 sregs [srcindex] = var->dreg;
14862 //mono_inst_set_src_registers (ins, sregs);
14863 live_range_end [sreg] = use_ins;
14864 live_range_end_bb [sreg] = bb;
14866 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14869 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14870 /* var->dreg is a hreg */
14871 tmp->inst_c1 = sreg;
14872 mono_bblock_insert_after_ins (bb, ins, tmp);
14878 g_assert (var->opcode == OP_REGOFFSET);
14880 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14882 g_assert (load_opcode != OP_LOADV_MEMBASE);
14884 if (vreg_to_lvreg [sreg]) {
14885 g_assert (vreg_to_lvreg [sreg] != -1);
14887 /* The variable is already loaded to an lvreg */
14888 if (G_UNLIKELY (cfg->verbose_level > 2))
14889 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14890 sregs [srcindex] = vreg_to_lvreg [sreg];
14891 //mono_inst_set_src_registers (ins, sregs);
14895 /* Try to fuse the load into the instruction */
14896 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14897 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14898 sregs [0] = var->inst_basereg;
14899 //mono_inst_set_src_registers (ins, sregs);
14900 ins->inst_offset = var->inst_offset;
14901 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14902 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14903 sregs [1] = var->inst_basereg;
14904 //mono_inst_set_src_registers (ins, sregs);
14905 ins->inst_offset = var->inst_offset;
14907 if (MONO_IS_REAL_MOVE (ins)) {
14908 ins->opcode = OP_NOP;
14911 //printf ("%d ", srcindex); mono_print_ins (ins);
14913 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14915 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14916 if (var->dreg == prev_dreg) {
14918 * sreg refers to the value loaded by the load
14919 * emitted below, but we need to use ins->dreg
14920 * since it refers to the store emitted earlier.
14924 g_assert (sreg != -1);
14925 vreg_to_lvreg [var->dreg] = sreg;
14926 g_assert (lvregs_len < 1024);
14927 lvregs [lvregs_len ++] = var->dreg;
14931 sregs [srcindex] = sreg;
14932 //mono_inst_set_src_registers (ins, sregs);
14934 #if SIZEOF_REGISTER != 8
14935 if (regtype == 'l') {
14936 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14937 mono_bblock_insert_before_ins (bb, ins, load_ins);
14938 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14939 mono_bblock_insert_before_ins (bb, ins, load_ins);
14940 use_ins = load_ins;
14945 #if SIZEOF_REGISTER == 4
14946 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14948 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14949 mono_bblock_insert_before_ins (bb, ins, load_ins);
14950 use_ins = load_ins;
14954 if (var->dreg < orig_next_vreg) {
14955 live_range_end [var->dreg] = use_ins;
14956 live_range_end_bb [var->dreg] = bb;
14959 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14962 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14963 tmp->inst_c1 = var->dreg;
14964 mono_bblock_insert_after_ins (bb, ins, tmp);
14968 mono_inst_set_src_registers (ins, sregs);
14970 if (dest_has_lvreg) {
14971 g_assert (ins->dreg != -1);
14972 vreg_to_lvreg [prev_dreg] = ins->dreg;
14973 g_assert (lvregs_len < 1024);
14974 lvregs [lvregs_len ++] = prev_dreg;
14975 dest_has_lvreg = FALSE;
14979 tmp_reg = ins->dreg;
14980 ins->dreg = ins->sreg2;
14981 ins->sreg2 = tmp_reg;
14984 if (MONO_IS_CALL (ins)) {
14985 /* Clear vreg_to_lvreg array */
14986 for (i = 0; i < lvregs_len; i++)
14987 vreg_to_lvreg [lvregs [i]] = 0;
14989 } else if (ins->opcode == OP_NOP) {
14991 MONO_INST_NULLIFY_SREGS (ins);
14994 if (cfg->verbose_level > 2)
14995 mono_print_ins_index (1, ins);
14998 /* Extend the live range based on the liveness info */
14999 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
15000 for (i = 0; i < cfg->num_varinfo; i ++) {
15001 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
15003 if (vreg_is_volatile (cfg, vi->vreg))
15004 /* The liveness info is incomplete */
15007 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
15008 /* Live from at least the first ins of this bb */
15009 live_range_start [vi->vreg] = bb->code;
15010 live_range_start_bb [vi->vreg] = bb;
15013 if (mono_bitset_test_fast (bb->live_out_set, i)) {
15014 /* Live at least until the last ins of this bb */
15015 live_range_end [vi->vreg] = bb->last_ins;
15016 live_range_end_bb [vi->vreg] = bb;
15023 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
15024 * by storing the current native offset into MonoMethodVar->live_range_start/end.
15026 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
15027 for (i = 0; i < cfg->num_varinfo; ++i) {
15028 int vreg = MONO_VARINFO (cfg, i)->vreg;
15031 if (live_range_start [vreg]) {
15032 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
15034 ins->inst_c1 = vreg;
15035 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
15037 if (live_range_end [vreg]) {
15038 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
15040 ins->inst_c1 = vreg;
15041 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
15042 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
15044 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15049 if (cfg->gsharedvt_locals_var_ins) {
15050 /* Nullify if unused */
15051 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15052 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15055 g_free (live_range_start);
15056 g_free (live_range_end);
15057 g_free (live_range_start_bb);
15058 g_free (live_range_end_bb);
15062 mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
15064 MonoInst *ret, *move, *source;
15065 MonoClass *klass = ins->klass;
15066 int context_used = mini_class_check_context_used (cfg, klass);
15067 int is_isinst = ins->opcode == OP_ISINST;
15068 g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
15069 source = get_vreg_to_inst (cfg, ins->sreg1);
15070 if (!source || source == (MonoInst *) -1)
15071 source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
15072 g_assert (source && source != (MonoInst *) -1);
15074 MonoBasicBlock *first_bb;
15075 NEW_BBLOCK (cfg, first_bb);
15076 cfg->cbb = first_bb;
15078 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
15080 ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
15082 ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
15083 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
15084 MonoInst *iargs [1];
15087 iargs [0] = source;
15089 MonoMethod *wrapper = mono_marshal_get_isinst (klass);
15090 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15092 MonoMethod *wrapper = mono_marshal_get_castclass (klass);
15093 save_cast_details (cfg, klass, source->dreg, TRUE);
15094 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15095 reset_cast_details (cfg);
15097 g_assert (costs > 0);
15101 ret = handle_isinst (cfg, klass, source, context_used);
15103 ret = handle_castclass (cfg, klass, source, context_used);
15105 EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
15107 g_assert (cfg->cbb->code || first_bb->code);
15108 MonoInst *prev = ins->prev;
15109 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
15113 mono_decompose_typechecks (MonoCompile *cfg)
15115 for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
15117 MONO_BB_FOR_EACH_INS (bb, ins) {
15118 switch (ins->opcode) {
15121 mono_decompose_typecheck (cfg, bb, ins);
15131 * - use 'iadd' instead of 'int_add'
15132 * - handling ovf opcodes: decompose in method_to_ir.
15133 * - unify iregs/fregs
15134 * -> partly done, the missing parts are:
15135 * - a more complete unification would involve unifying the hregs as well, so
15136 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15137 * would no longer map to the machine hregs, so the code generators would need to
15138 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15139 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15140 * fp/non-fp branches speeds it up by about 15%.
15141 * - use sext/zext opcodes instead of shifts
15143 * - get rid of TEMPLOADs if possible and use vregs instead
15144 * - clean up usage of OP_P/OP_ opcodes
15145 * - cleanup usage of DUMMY_USE
15146 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15148 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15149 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15150 * - make sure handle_stack_args () is called before the branch is emitted
15151 * - when the new IR is done, get rid of all unused stuff
15152 * - COMPARE/BEQ as separate instructions or unify them ?
15153 * - keeping them separate allows specialized compare instructions like
15154 * compare_imm, compare_membase
15155 * - most back ends unify fp compare+branch, fp compare+ceq
15156 * - integrate mono_save_args into inline_method
15157 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15158 * - handle long shift opts on 32 bit platforms somehow: they require
15159 * 3 sregs (2 for arg1 and 1 for arg2)
15160 * - make byref a 'normal' type.
15161 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15162 * variable if needed.
15163 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15164 * like inline_method.
15165 * - remove inlining restrictions
15166 * - fix LNEG and enable cfold of INEG
15167 * - generalize x86 optimizations like ldelema as a peephole optimization
15168 * - add store_mem_imm for amd64
15169 * - optimize the loading of the interruption flag in the managed->native wrappers
15170 * - avoid special handling of OP_NOP in passes
15171 * - move code inserting instructions into one function/macro.
15172 * - try a coalescing phase after liveness analysis
15173 * - add float -> vreg conversion + local optimizations on !x86
15174 * - figure out how to handle decomposed branches during optimizations, ie.
15175 * compare+branch, op_jump_table+op_br etc.
15176 * - promote RuntimeXHandles to vregs
15177 * - vtype cleanups:
15178 * - add a NEW_VARLOADA_VREG macro
15179 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15180 * accessing vtype fields.
15181 * - get rid of I8CONST on 64 bit platforms
15182 * - dealing with the increase in code size due to branches created during opcode
15184 * - use extended basic blocks
15185 * - all parts of the JIT
15186 * - handle_global_vregs () && local regalloc
15187 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15188 * - sources of increase in code size:
15191 * - isinst and castclass
15192 * - lvregs not allocated to global registers even if used multiple times
15193 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15195 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15196 * - add all micro optimizations from the old JIT
15197 * - put tree optimizations into the deadce pass
15198 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15199 * specific function.
15200 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15201 * fcompare + branchCC.
15202 * - create a helper function for allocating a stack slot, taking into account
15203 * MONO_CFG_HAS_SPILLUP.
15205 * - merge the ia64 switch changes.
15206 * - optimize mono_regstate2_alloc_int/float.
15207 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15208 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15209 * parts of the tree could be separated by other instructions, killing the tree
15210 * arguments, or stores killing loads etc. Also, should we fold loads into other
15211 * instructions if the result of the load is used multiple times ?
15212 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15213 * - LAST MERGE: 108395.
15214 * - when returning vtypes in registers, generate IR and append it to the end of the
15215 * last bb instead of doing it in the epilog.
15216 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15224 - When to decompose opcodes:
15225 - earlier: this makes some optimizations hard to implement, since the low level IR
15226 no longer contains the neccessary information. But it is easier to do.
15227 - later: harder to implement, enables more optimizations.
15228 - Branches inside bblocks:
15229 - created when decomposing complex opcodes.
15230 - branches to another bblock: harmless, but not tracked by the branch
15231 optimizations, so need to branch to a label at the start of the bblock.
15232 - branches to inside the same bblock: very problematic, trips up the local
15233 reg allocator. Can be fixed by spitting the current bblock, but that is a
15234 complex operation, since some local vregs can become global vregs etc.
15235 - Local/global vregs:
15236 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15237 local register allocator.
15238 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15239 structure, created by mono_create_var (). Assigned to hregs or the stack by
15240 the global register allocator.
15241 - When to do optimizations like alu->alu_imm:
15242 - earlier -> saves work later on since the IR will be smaller/simpler
15243 - later -> can work on more instructions
15244 - Handling of valuetypes:
15245 - When a vtype is pushed on the stack, a new temporary is created, an
15246 instruction computing its address (LDADDR) is emitted and pushed on
15247 the stack. Need to optimize cases when the vtype is used immediately as in
15248 argument passing, stloc etc.
15249 - Instead of the to_end stuff in the old JIT, simply call the function handling
15250 the values on the stack before emitting the last instruction of the bb.
15253 #endif /* DISABLE_JIT */